remove "key-generator" node type and client support
closes ticket:2783
This commit is contained in:
parent
c715e0d839
commit
d1d988410b
|
@ -69,8 +69,7 @@ The item descriptions below use the following types:
|
||||||
Node Types
|
Node Types
|
||||||
==========
|
==========
|
||||||
|
|
||||||
A node can be a client/server, an introducer, a statistics gatherer, or a
|
A node can be a client/server, an introducer, or a statistics gatherer.
|
||||||
key generator.
|
|
||||||
|
|
||||||
Client/server nodes provide one or more of the following services:
|
Client/server nodes provide one or more of the following services:
|
||||||
|
|
||||||
|
@ -335,12 +334,6 @@ Client Configuration
|
||||||
If provided, the node will attempt to connect to and use the given helper
|
If provided, the node will attempt to connect to and use the given helper
|
||||||
for uploads. See :doc:`helper` for details.
|
for uploads. See :doc:`helper` for details.
|
||||||
|
|
||||||
``key_generator.furl = (FURL string, optional)``
|
|
||||||
|
|
||||||
If provided, the node will attempt to connect to and use the given
|
|
||||||
key-generator service, using RSA keys from the external process rather
|
|
||||||
than generating its own.
|
|
||||||
|
|
||||||
``stats_gatherer.furl = (FURL string, optional)``
|
``stats_gatherer.furl = (FURL string, optional)``
|
||||||
|
|
||||||
If provided, the node will connect to the given stats gatherer and
|
If provided, the node will connect to the given stats gatherer and
|
||||||
|
@ -612,11 +605,6 @@ This section describes these other files.
|
||||||
This file is used to construct an introducer, and is created by the
|
This file is used to construct an introducer, and is created by the
|
||||||
"``tahoe create-introducer``" command.
|
"``tahoe create-introducer``" command.
|
||||||
|
|
||||||
``tahoe-key-generator.tac``
|
|
||||||
|
|
||||||
This file is used to construct a key generator, and is created by the
|
|
||||||
"``tahoe create-key-gernerator``" command.
|
|
||||||
|
|
||||||
``tahoe-stats-gatherer.tac``
|
``tahoe-stats-gatherer.tac``
|
||||||
|
|
||||||
This file is used to construct a statistics gatherer, and is created by the
|
This file is used to construct a statistics gatherer, and is created by the
|
||||||
|
|
|
@ -105,14 +105,6 @@ This node provides introduction services and nothing else. When started, this
|
||||||
node will produce a ``private/introducer.furl`` file, which should be
|
node will produce a ``private/introducer.furl`` file, which should be
|
||||||
published to all clients.
|
published to all clients.
|
||||||
|
|
||||||
"``tahoe create-key-generator [NODEDIR]``" is used to create a special
|
|
||||||
"key-generation" service, which allows a client to offload their RSA key
|
|
||||||
generation to a separate process. Since RSA key generation takes several
|
|
||||||
seconds, and must be done each time a directory is created, moving it to a
|
|
||||||
separate process allows the first process (perhaps a busy web-API server) to
|
|
||||||
continue servicing other requests. The key generator exports a FURL that can
|
|
||||||
be copied into a node to enable this functionality.
|
|
||||||
|
|
||||||
"``tahoe run [NODEDIR]``" will start a previously-created node in the foreground.
|
"``tahoe run [NODEDIR]``" will start a previously-created node in the foreground.
|
||||||
|
|
||||||
"``tahoe start [NODEDIR]``" will launch a previously-created node. It will
|
"``tahoe start [NODEDIR]``" will launch a previously-created node. It will
|
||||||
|
|
|
@ -46,9 +46,6 @@ Create a client node (with storage initially disabled).
|
||||||
.B \f[B]create-introducer\f[]
|
.B \f[B]create-introducer\f[]
|
||||||
Create an introducer node.
|
Create an introducer node.
|
||||||
.TP
|
.TP
|
||||||
.B \f[B]create-key-generator\f[]
|
|
||||||
Create a key generator service.
|
|
||||||
.TP
|
|
||||||
.B \f[B]create-stats-gatherer\f[]
|
.B \f[B]create-stats-gatherer\f[]
|
||||||
Create a stats-gatherer service.
|
Create a stats-gatherer service.
|
||||||
.SS OPTIONS
|
.SS OPTIONS
|
||||||
|
|
|
@ -58,11 +58,8 @@ class KeyGenerator:
|
||||||
to generate(), then with a default set by set_default_keysize(), then
|
to generate(), then with a default set by set_default_keysize(), then
|
||||||
with a built-in default of 2048 bits."""
|
with a built-in default of 2048 bits."""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._remote = None
|
|
||||||
self.default_keysize = 2048
|
self.default_keysize = 2048
|
||||||
|
|
||||||
def set_remote_generator(self, keygen):
|
|
||||||
self._remote = keygen
|
|
||||||
def set_default_keysize(self, keysize):
|
def set_default_keysize(self, keysize):
|
||||||
"""Call this to override the size of the RSA keys created for new
|
"""Call this to override the size of the RSA keys created for new
|
||||||
mutable files which don't otherwise specify a size. This will affect
|
mutable files which don't otherwise specify a size. This will affect
|
||||||
|
@ -80,15 +77,6 @@ class KeyGenerator:
|
||||||
set_default_keysize() has never been called, I will create 2048 bit
|
set_default_keysize() has never been called, I will create 2048 bit
|
||||||
keys."""
|
keys."""
|
||||||
keysize = keysize or self.default_keysize
|
keysize = keysize or self.default_keysize
|
||||||
if self._remote:
|
|
||||||
d = self._remote.callRemote('get_rsa_key_pair', keysize)
|
|
||||||
def make_key_objs((verifying_key, signing_key)):
|
|
||||||
v = rsa.create_verifying_key_from_string(verifying_key)
|
|
||||||
s = rsa.create_signing_key_from_string(signing_key)
|
|
||||||
return v, s
|
|
||||||
d.addCallback(make_key_objs)
|
|
||||||
return d
|
|
||||||
else:
|
|
||||||
# RSA key generation for a 2048 bit key takes between 0.8 and 3.2
|
# RSA key generation for a 2048 bit key takes between 0.8 and 3.2
|
||||||
# secs
|
# secs
|
||||||
signer = rsa.generate(keysize)
|
signer = rsa.generate(keysize)
|
||||||
|
@ -145,7 +133,7 @@ class Client(node.Node, pollmixin.PollMixin):
|
||||||
self._key_generator = KeyGenerator()
|
self._key_generator = KeyGenerator()
|
||||||
key_gen_furl = self.get_config("client", "key_generator.furl", None)
|
key_gen_furl = self.get_config("client", "key_generator.furl", None)
|
||||||
if key_gen_furl:
|
if key_gen_furl:
|
||||||
self.init_key_gen(key_gen_furl)
|
log.msg("[client]key_generator.furl= is now ignored, see #2783")
|
||||||
self.init_client()
|
self.init_client()
|
||||||
self.helper = None
|
self.helper = None
|
||||||
if self.get_config("helper", "enabled", False, boolean=True):
|
if self.get_config("helper", "enabled", False, boolean=True):
|
||||||
|
@ -442,16 +430,6 @@ class Client(node.Node, pollmixin.PollMixin):
|
||||||
"private", "helper.furl").encode(get_filesystem_encoding())
|
"private", "helper.furl").encode(get_filesystem_encoding())
|
||||||
self.tub.registerReference(self.helper, furlFile=helper_furlfile)
|
self.tub.registerReference(self.helper, furlFile=helper_furlfile)
|
||||||
|
|
||||||
def init_key_gen(self, key_gen_furl):
|
|
||||||
self.tub.connectTo(key_gen_furl, self._got_key_generator)
|
|
||||||
|
|
||||||
def _got_key_generator(self, key_generator):
|
|
||||||
self._key_generator.set_remote_generator(key_generator)
|
|
||||||
key_generator.notifyOnDisconnect(self._lost_key_generator)
|
|
||||||
|
|
||||||
def _lost_key_generator(self):
|
|
||||||
self._key_generator.set_remote_generator(None)
|
|
||||||
|
|
||||||
def set_default_mutable_keysize(self, keysize):
|
def set_default_mutable_keysize(self, keysize):
|
||||||
self._key_generator.set_default_keysize(keysize)
|
self._key_generator.set_default_keysize(keysize)
|
||||||
|
|
||||||
|
|
|
@ -2808,20 +2808,6 @@ class IStatsProducer(Interface):
|
||||||
to be monitored, and numeric values.
|
to be monitored, and numeric values.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class RIKeyGenerator(RemoteInterface):
|
|
||||||
__remote_name__ = "RIKeyGenerator.tahoe.allmydata.com"
|
|
||||||
"""
|
|
||||||
Provides a service offering to make RSA key pairs.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def get_rsa_key_pair(key_size=int):
|
|
||||||
"""
|
|
||||||
@param key_size: the size of the signature key.
|
|
||||||
@return: tuple(verifying_key, signing_key)
|
|
||||||
"""
|
|
||||||
return TupleOf(str, str)
|
|
||||||
|
|
||||||
|
|
||||||
class FileTooLargeError(Exception):
|
class FileTooLargeError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -1,111 +0,0 @@
|
||||||
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
|
|
||||||
from foolscap.api import Referenceable, Tub
|
|
||||||
from zope.interface import implements
|
|
||||||
from twisted.internet import reactor
|
|
||||||
from twisted.application import service
|
|
||||||
from allmydata.util import log, fileutil
|
|
||||||
|
|
||||||
from pycryptopp.publickey import rsa
|
|
||||||
from allmydata.interfaces import RIKeyGenerator
|
|
||||||
|
|
||||||
class KeyGenerator(service.MultiService, Referenceable):
|
|
||||||
implements(RIKeyGenerator)
|
|
||||||
|
|
||||||
pool_size = 16 # no. keys to keep on hand in the pool
|
|
||||||
pool_refresh_delay = 6 # no. sec to wait after a fetch before generating new keys
|
|
||||||
verbose = False
|
|
||||||
|
|
||||||
def __init__(self, default_key_size=2048):
|
|
||||||
service.MultiService.__init__(self)
|
|
||||||
self.keypool = []
|
|
||||||
self.last_fetch = 0
|
|
||||||
self.default_key_size = default_key_size
|
|
||||||
|
|
||||||
def startService(self):
|
|
||||||
self.timer = reactor.callLater(0, self.maybe_refill_pool)
|
|
||||||
return service.MultiService.startService(self)
|
|
||||||
|
|
||||||
def stopService(self):
|
|
||||||
if self.timer.active():
|
|
||||||
self.timer.cancel()
|
|
||||||
return service.MultiService.stopService(self)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return '<KeyGenerator[%s]>' % (len(self.keypool),)
|
|
||||||
|
|
||||||
def vlog(self, msg):
|
|
||||||
if self.verbose:
|
|
||||||
log.msg(msg)
|
|
||||||
|
|
||||||
def reset_timer(self):
|
|
||||||
self.last_fetch = time.time()
|
|
||||||
if self.timer.active():
|
|
||||||
self.timer.reset(self.pool_refresh_delay)
|
|
||||||
else:
|
|
||||||
self.timer = reactor.callLater(self.pool_refresh_delay, self.maybe_refill_pool)
|
|
||||||
|
|
||||||
def maybe_refill_pool(self):
|
|
||||||
now = time.time()
|
|
||||||
if self.last_fetch + self.pool_refresh_delay < now:
|
|
||||||
self.vlog('%s refilling pool' % (self,))
|
|
||||||
while len(self.keypool) < self.pool_size:
|
|
||||||
self.keypool.append(self.gen_key(self.default_key_size))
|
|
||||||
else:
|
|
||||||
self.vlog('%s not refilling pool' % (self,))
|
|
||||||
reactor.callLater(1, self.maybe_refill_pool)
|
|
||||||
|
|
||||||
def gen_key(self, key_size):
|
|
||||||
self.vlog('%s generating key size %s' % (self, key_size, ))
|
|
||||||
signer = rsa.generate(key_size)
|
|
||||||
verifier = signer.get_verifying_key()
|
|
||||||
return verifier.serialize(), signer.serialize()
|
|
||||||
|
|
||||||
def remote_get_rsa_key_pair(self, key_size):
|
|
||||||
self.vlog('%s remote_get_key' % (self,))
|
|
||||||
if key_size != self.default_key_size or not self.keypool:
|
|
||||||
key = self.gen_key(key_size)
|
|
||||||
self.reset_timer()
|
|
||||||
return key
|
|
||||||
else:
|
|
||||||
self.reset_timer()
|
|
||||||
return self.keypool.pop()
|
|
||||||
|
|
||||||
class KeyGeneratorService(service.MultiService):
|
|
||||||
furl_file = 'key_generator.furl'
|
|
||||||
|
|
||||||
def __init__(self, basedir='.', display_furl=True, default_key_size=2048):
|
|
||||||
service.MultiService.__init__(self)
|
|
||||||
self.basedir = basedir
|
|
||||||
fileutil.make_dirs(self.basedir)
|
|
||||||
self.tub = Tub(certFile=os.path.join(self.basedir, 'key_generator.pem'))
|
|
||||||
self.tub.setOption("expose-remote-exception-types", False)
|
|
||||||
self.tub.setServiceParent(self)
|
|
||||||
self.key_generator = KeyGenerator(default_key_size=default_key_size)
|
|
||||||
self.key_generator.setServiceParent(self)
|
|
||||||
|
|
||||||
portnum = self.get_portnum()
|
|
||||||
self.listener = self.tub.listenOn(portnum or 'tcp:0')
|
|
||||||
d = self.tub.setLocationAutomatically()
|
|
||||||
if portnum is None:
|
|
||||||
d.addCallback(self.save_portnum)
|
|
||||||
d.addCallback(self.tub_ready, display_furl)
|
|
||||||
d.addErrback(log.err)
|
|
||||||
|
|
||||||
def get_portnum(self):
|
|
||||||
portnumfile = os.path.join(self.basedir, 'portnum')
|
|
||||||
if os.path.exists(portnumfile):
|
|
||||||
return file(portnumfile, 'rb').read().strip()
|
|
||||||
|
|
||||||
def save_portnum(self, junk):
|
|
||||||
portnum = self.listener.getPortnum()
|
|
||||||
portnumfile = os.path.join(self.basedir, 'portnum')
|
|
||||||
file(portnumfile, 'wb').write('%d\n' % (portnum,))
|
|
||||||
|
|
||||||
def tub_ready(self, junk, display_furl):
|
|
||||||
kgf = os.path.join(self.basedir, self.furl_file)
|
|
||||||
self.keygen_furl = self.tub.registerReference(self.key_generator, furlFile=kgf)
|
|
||||||
if display_furl:
|
|
||||||
print 'key generator at:', self.keygen_furl
|
|
|
@ -109,7 +109,6 @@ def create_node(config, out=sys.stdout, err=sys.stderr):
|
||||||
c.write("# Which services should this client connect to?\n")
|
c.write("# Which services should this client connect to?\n")
|
||||||
c.write("introducer.furl = %s\n" % config.get("introducer", ""))
|
c.write("introducer.furl = %s\n" % config.get("introducer", ""))
|
||||||
c.write("helper.furl =\n")
|
c.write("helper.furl =\n")
|
||||||
c.write("#key_generator.furl =\n")
|
|
||||||
c.write("#stats_gatherer.furl =\n")
|
c.write("#stats_gatherer.furl =\n")
|
||||||
c.write("\n")
|
c.write("\n")
|
||||||
c.write("# Encoding parameters this client will use for newly-uploaded files\n")
|
c.write("# Encoding parameters this client will use for newly-uploaded files\n")
|
||||||
|
|
|
@ -1,38 +0,0 @@
|
||||||
|
|
||||||
import os, sys
|
|
||||||
|
|
||||||
from allmydata.scripts.common import NoDefaultBasedirOptions
|
|
||||||
from allmydata.scripts.create_node import write_tac
|
|
||||||
from allmydata.util.assertutil import precondition
|
|
||||||
from allmydata.util.encodingutil import listdir_unicode, quote_output
|
|
||||||
|
|
||||||
|
|
||||||
class CreateKeyGeneratorOptions(NoDefaultBasedirOptions):
|
|
||||||
subcommand_name = "create-key-generator"
|
|
||||||
|
|
||||||
|
|
||||||
def create_key_generator(config, out=sys.stdout, err=sys.stderr):
|
|
||||||
basedir = config['basedir']
|
|
||||||
# This should always be called with an absolute Unicode basedir.
|
|
||||||
precondition(isinstance(basedir, unicode), basedir)
|
|
||||||
|
|
||||||
if os.path.exists(basedir):
|
|
||||||
if listdir_unicode(basedir):
|
|
||||||
print >>err, "The base directory %s is not empty." % quote_output(basedir)
|
|
||||||
print >>err, "To avoid clobbering anything, I am going to quit now."
|
|
||||||
print >>err, "Please use a different directory, or empty this one."
|
|
||||||
return -1
|
|
||||||
# we're willing to use an empty directory
|
|
||||||
else:
|
|
||||||
os.mkdir(basedir)
|
|
||||||
write_tac(basedir, "key-generator")
|
|
||||||
return 0
|
|
||||||
|
|
||||||
subCommands = [
|
|
||||||
["create-key-generator", None, CreateKeyGeneratorOptions, "Create a key generator service."],
|
|
||||||
]
|
|
||||||
|
|
||||||
dispatch = {
|
|
||||||
"create-key-generator": create_key_generator,
|
|
||||||
}
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ from cStringIO import StringIO
|
||||||
from twisted.python import usage
|
from twisted.python import usage
|
||||||
|
|
||||||
from allmydata.scripts.common import get_default_nodedir
|
from allmydata.scripts.common import get_default_nodedir
|
||||||
from allmydata.scripts import debug, create_node, startstop_node, cli, keygen, stats_gatherer, admin
|
from allmydata.scripts import debug, create_node, startstop_node, cli, stats_gatherer, admin
|
||||||
from allmydata.util.encodingutil import quote_output, quote_local_unicode_path, get_io_encoding
|
from allmydata.util.encodingutil import quote_output, quote_local_unicode_path, get_io_encoding
|
||||||
|
|
||||||
def GROUP(s):
|
def GROUP(s):
|
||||||
|
@ -36,7 +36,6 @@ class Options(usage.Options):
|
||||||
synopsis = "\nUsage: tahoe <command> [command options]"
|
synopsis = "\nUsage: tahoe <command> [command options]"
|
||||||
subCommands = ( GROUP("Administration")
|
subCommands = ( GROUP("Administration")
|
||||||
+ create_node.subCommands
|
+ create_node.subCommands
|
||||||
+ keygen.subCommands
|
|
||||||
+ stats_gatherer.subCommands
|
+ stats_gatherer.subCommands
|
||||||
+ admin.subCommands
|
+ admin.subCommands
|
||||||
+ GROUP("Controlling a node")
|
+ GROUP("Controlling a node")
|
||||||
|
@ -85,7 +84,7 @@ class Options(usage.Options):
|
||||||
|
|
||||||
|
|
||||||
create_dispatch = {}
|
create_dispatch = {}
|
||||||
for module in (create_node, keygen, stats_gatherer):
|
for module in (create_node, stats_gatherer):
|
||||||
create_dispatch.update(module.dispatch)
|
create_dispatch.update(module.dispatch)
|
||||||
|
|
||||||
def runner(argv,
|
def runner(argv,
|
||||||
|
|
|
@ -80,8 +80,7 @@ class StartTahoeNodePlugin:
|
||||||
from allmydata.introducer.server import IntroducerNode
|
from allmydata.introducer.server import IntroducerNode
|
||||||
return IntroducerNode(self.basedir)
|
return IntroducerNode(self.basedir)
|
||||||
if self.nodetype == "key-generator":
|
if self.nodetype == "key-generator":
|
||||||
from allmydata.key_generator import KeyGeneratorService
|
raise ValueError("key-generator support removed, see #2783")
|
||||||
return KeyGeneratorService(default_key_size=2048)
|
|
||||||
if self.nodetype == "stats-gatherer":
|
if self.nodetype == "stats-gatherer":
|
||||||
from allmydata.stats import StatsGathererService
|
from allmydata.stats import StatsGathererService
|
||||||
return StatsGathererService(verbose=True)
|
return StatsGathererService(verbose=True)
|
||||||
|
|
|
@ -22,7 +22,6 @@ from allmydata.util import hashutil, log, fileutil, pollmixin, iputil
|
||||||
from allmydata.util.assertutil import precondition
|
from allmydata.util.assertutil import precondition
|
||||||
from allmydata.util.consumer import download_to_data
|
from allmydata.util.consumer import download_to_data
|
||||||
from allmydata.stats import StatsGathererService
|
from allmydata.stats import StatsGathererService
|
||||||
from allmydata.key_generator import KeyGeneratorService
|
|
||||||
import allmydata.test.common_util as testutil
|
import allmydata.test.common_util as testutil
|
||||||
from allmydata import immutable
|
from allmydata import immutable
|
||||||
|
|
||||||
|
@ -448,8 +447,6 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
|
||||||
|
|
||||||
self.stats_gatherer = None
|
self.stats_gatherer = None
|
||||||
self.stats_gatherer_furl = None
|
self.stats_gatherer_furl = None
|
||||||
self.key_generator_svc = None
|
|
||||||
self.key_generator_furl = None
|
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
log.msg("shutting down SystemTest services")
|
log.msg("shutting down SystemTest services")
|
||||||
|
@ -464,8 +461,7 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
|
||||||
s.setServiceParent(self.sparent)
|
s.setServiceParent(self.sparent)
|
||||||
return s
|
return s
|
||||||
|
|
||||||
def set_up_nodes(self, NUMCLIENTS=5,
|
def set_up_nodes(self, NUMCLIENTS=5, use_stats_gatherer=False):
|
||||||
use_stats_gatherer=False, use_key_generator=False):
|
|
||||||
self.numclients = NUMCLIENTS
|
self.numclients = NUMCLIENTS
|
||||||
iv_dir = self.getdir("introducer")
|
iv_dir = self.getdir("introducer")
|
||||||
if not os.path.isdir(iv_dir):
|
if not os.path.isdir(iv_dir):
|
||||||
|
@ -485,8 +481,6 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
|
||||||
d = defer.succeed(None)
|
d = defer.succeed(None)
|
||||||
if use_stats_gatherer:
|
if use_stats_gatherer:
|
||||||
d.addCallback(self._set_up_stats_gatherer)
|
d.addCallback(self._set_up_stats_gatherer)
|
||||||
if use_key_generator:
|
|
||||||
d.addCallback(self._set_up_key_generator)
|
|
||||||
d.addCallback(self._set_up_nodes_2)
|
d.addCallback(self._set_up_nodes_2)
|
||||||
if use_stats_gatherer:
|
if use_stats_gatherer:
|
||||||
d.addCallback(self._grab_stats)
|
d.addCallback(self._grab_stats)
|
||||||
|
@ -514,27 +508,6 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
|
||||||
d.addCallback(get_furl)
|
d.addCallback(get_furl)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def _set_up_key_generator(self, res):
|
|
||||||
kgsdir = self.getdir("key_generator")
|
|
||||||
fileutil.make_dirs(kgsdir)
|
|
||||||
|
|
||||||
self.key_generator_svc = KeyGeneratorService(kgsdir,
|
|
||||||
display_furl=False,
|
|
||||||
default_key_size=TEST_RSA_KEY_SIZE)
|
|
||||||
self.key_generator_svc.key_generator.pool_size = 4
|
|
||||||
self.key_generator_svc.key_generator.pool_refresh_delay = 60
|
|
||||||
self.add_service(self.key_generator_svc)
|
|
||||||
|
|
||||||
d = fireEventually()
|
|
||||||
def check_for_furl():
|
|
||||||
return os.path.exists(os.path.join(kgsdir, 'key_generator.furl'))
|
|
||||||
d.addCallback(lambda junk: self.poll(check_for_furl, timeout=30))
|
|
||||||
def get_furl(junk):
|
|
||||||
kgf = os.path.join(kgsdir, 'key_generator.furl')
|
|
||||||
self.key_generator_furl = file(kgf, 'rb').read().strip()
|
|
||||||
d.addCallback(get_furl)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def _set_up_nodes_2(self, res):
|
def _set_up_nodes_2(self, res):
|
||||||
q = self.introducer
|
q = self.introducer
|
||||||
self.introducer_furl = q.introducer_url
|
self.introducer_furl = q.introducer_url
|
||||||
|
@ -563,17 +536,14 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
|
||||||
nodeconfig += "tub.location = tcp:127.0.0.1:%d\n" % tub_port
|
nodeconfig += "tub.location = tcp:127.0.0.1:%d\n" % tub_port
|
||||||
|
|
||||||
if i == 0:
|
if i == 0:
|
||||||
# clients[0] runs a webserver and a helper, no key_generator
|
# clients[0] runs a webserver and a helper
|
||||||
config += nodeconfig
|
config += nodeconfig
|
||||||
config += "web.port = tcp:0:interface=127.0.0.1\n"
|
config += "web.port = tcp:0:interface=127.0.0.1\n"
|
||||||
config += "timeout.keepalive = 600\n"
|
config += "timeout.keepalive = 600\n"
|
||||||
config += "[helper]\n"
|
config += "[helper]\n"
|
||||||
config += "enabled = True\n"
|
config += "enabled = True\n"
|
||||||
elif i == 3:
|
elif i == 3:
|
||||||
# clients[3] runs a webserver and uses a helper, uses
|
# clients[3] runs a webserver and uses a helper
|
||||||
# key_generator
|
|
||||||
if self.key_generator_furl:
|
|
||||||
config += "key_generator.furl = %s\n" % self.key_generator_furl
|
|
||||||
config += nodeconfig
|
config += nodeconfig
|
||||||
config += "web.port = tcp:0:interface=127.0.0.1\n"
|
config += "web.port = tcp:0:interface=127.0.0.1\n"
|
||||||
config += "timeout.disconnect = 1800\n"
|
config += "timeout.disconnect = 1800\n"
|
||||||
|
|
|
@ -10,8 +10,8 @@
|
||||||
|
|
||||||
# This should be useful for tests which want to examine and/or manipulate the
|
# This should be useful for tests which want to examine and/or manipulate the
|
||||||
# uploaded shares, checker/verifier/repairer tests, etc. The clients have no
|
# uploaded shares, checker/verifier/repairer tests, etc. The clients have no
|
||||||
# Tubs, so it is not useful for tests that involve a Helper, a KeyGenerator,
|
# Tubs, so it is not useful for tests that involve a Helper or the
|
||||||
# or the control.furl .
|
# control.furl .
|
||||||
|
|
||||||
import os
|
import os
|
||||||
from zope.interface import implements
|
from zope.interface import implements
|
||||||
|
|
|
@ -17,10 +17,10 @@ import allmydata.scripts.common_http
|
||||||
from pycryptopp.publickey import ed25519
|
from pycryptopp.publickey import ed25519
|
||||||
|
|
||||||
# Test that the scripts can be imported.
|
# Test that the scripts can be imported.
|
||||||
from allmydata.scripts import create_node, debug, keygen, startstop_node, \
|
from allmydata.scripts import create_node, debug, startstop_node, \
|
||||||
tahoe_add_alias, tahoe_backup, tahoe_check, tahoe_cp, tahoe_get, tahoe_ls, \
|
tahoe_add_alias, tahoe_backup, tahoe_check, tahoe_cp, tahoe_get, tahoe_ls, \
|
||||||
tahoe_manifest, tahoe_mkdir, tahoe_mv, tahoe_put, tahoe_unlink, tahoe_webopen
|
tahoe_manifest, tahoe_mkdir, tahoe_mv, tahoe_put, tahoe_unlink, tahoe_webopen
|
||||||
_hush_pyflakes = [create_node, debug, keygen, startstop_node,
|
_hush_pyflakes = [create_node, debug, startstop_node,
|
||||||
tahoe_add_alias, tahoe_backup, tahoe_check, tahoe_cp, tahoe_get, tahoe_ls,
|
tahoe_add_alias, tahoe_backup, tahoe_check, tahoe_cp, tahoe_get, tahoe_ls,
|
||||||
tahoe_manifest, tahoe_mkdir, tahoe_mv, tahoe_put, tahoe_unlink, tahoe_webopen]
|
tahoe_manifest, tahoe_mkdir, tahoe_mv, tahoe_put, tahoe_unlink, tahoe_webopen]
|
||||||
|
|
||||||
|
|
|
@ -1,100 +0,0 @@
|
||||||
|
|
||||||
import os
|
|
||||||
from twisted.trial import unittest
|
|
||||||
from twisted.application import service
|
|
||||||
|
|
||||||
from foolscap.api import Tub, fireEventually, flushEventualQueue
|
|
||||||
|
|
||||||
from allmydata import key_generator
|
|
||||||
from allmydata.util import pollmixin
|
|
||||||
from allmydata.test.common import TEST_RSA_KEY_SIZE
|
|
||||||
from pycryptopp.publickey import rsa
|
|
||||||
|
|
||||||
def flush_but_dont_ignore(res):
|
|
||||||
d = flushEventualQueue()
|
|
||||||
def _done(ignored):
|
|
||||||
return res
|
|
||||||
d.addCallback(_done)
|
|
||||||
return d
|
|
||||||
|
|
||||||
class KeyGenService(unittest.TestCase, pollmixin.PollMixin):
|
|
||||||
def setUp(self):
|
|
||||||
self.parent = service.MultiService()
|
|
||||||
self.parent.startService()
|
|
||||||
|
|
||||||
self.tub = t = Tub()
|
|
||||||
t.setOption("expose-remote-exception-types", False)
|
|
||||||
t.setServiceParent(self.parent)
|
|
||||||
t.listenOn("tcp:0")
|
|
||||||
t.setLocationAutomatically()
|
|
||||||
return fireEventually()
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
d = self.parent.stopService()
|
|
||||||
d.addCallback(fireEventually)
|
|
||||||
d.addBoth(flush_but_dont_ignore)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def test_key_gen_service(self):
|
|
||||||
def p(junk, msg):
|
|
||||||
#import time
|
|
||||||
#print time.asctime(), msg
|
|
||||||
return junk
|
|
||||||
|
|
||||||
#print 'starting key generator service'
|
|
||||||
keysize = TEST_RSA_KEY_SIZE
|
|
||||||
kgs = key_generator.KeyGeneratorService(display_furl=False, default_key_size=keysize, basedir="key_generator_service")
|
|
||||||
kgs.key_generator.verbose = True
|
|
||||||
kgs.setServiceParent(self.parent)
|
|
||||||
kgs.key_generator.pool_size = 8
|
|
||||||
|
|
||||||
def keypool_full():
|
|
||||||
return len(kgs.key_generator.keypool) == kgs.key_generator.pool_size
|
|
||||||
|
|
||||||
# first wait for key gen pool to fill up
|
|
||||||
d = fireEventually()
|
|
||||||
d.addCallback(p, 'waiting for pool to fill up')
|
|
||||||
d.addCallback(lambda junk: self.poll(keypool_full))
|
|
||||||
|
|
||||||
d.addCallback(p, 'grabbing a few keys')
|
|
||||||
# grab a few keys, check that pool size shrinks
|
|
||||||
def get_key(junk=None):
|
|
||||||
d = self.tub.getReference(kgs.keygen_furl)
|
|
||||||
d.addCallback(lambda kg: kg.callRemote('get_rsa_key_pair', keysize))
|
|
||||||
return d
|
|
||||||
|
|
||||||
def check_poolsize(junk, size):
|
|
||||||
self.failUnlessEqual(len(kgs.key_generator.keypool), size)
|
|
||||||
|
|
||||||
n_keys_to_waste = 4
|
|
||||||
for i in range(n_keys_to_waste):
|
|
||||||
d.addCallback(get_key)
|
|
||||||
d.addCallback(check_poolsize, kgs.key_generator.pool_size - n_keys_to_waste)
|
|
||||||
|
|
||||||
d.addCallback(p, 'checking a key works')
|
|
||||||
# check that a retrieved key is actually useful
|
|
||||||
d.addCallback(get_key)
|
|
||||||
def check_key_works(keys):
|
|
||||||
verifying_key, signing_key = keys
|
|
||||||
v = rsa.create_verifying_key_from_string(verifying_key)
|
|
||||||
s = rsa.create_signing_key_from_string(signing_key)
|
|
||||||
junk = os.urandom(42)
|
|
||||||
sig = s.sign(junk)
|
|
||||||
self.failUnless(v.verify(junk, sig))
|
|
||||||
d.addCallback(check_key_works)
|
|
||||||
|
|
||||||
d.addCallback(p, 'checking pool exhaustion')
|
|
||||||
# exhaust the pool
|
|
||||||
for i in range(kgs.key_generator.pool_size):
|
|
||||||
d.addCallback(get_key)
|
|
||||||
d.addCallback(check_poolsize, 0)
|
|
||||||
|
|
||||||
# and check it still works (will gen key synchronously on demand)
|
|
||||||
d.addCallback(get_key)
|
|
||||||
d.addCallback(check_key_works)
|
|
||||||
|
|
||||||
d.addCallback(p, 'checking pool replenishment')
|
|
||||||
# check that the pool will refill
|
|
||||||
d.addCallback(lambda junk: self.poll(keypool_full))
|
|
||||||
|
|
||||||
return d
|
|
|
@ -281,9 +281,6 @@ class CreateNode(unittest.TestCase):
|
||||||
def test_introducer(self):
|
def test_introducer(self):
|
||||||
self.do_create("introducer")
|
self.do_create("introducer")
|
||||||
|
|
||||||
def test_key_generator(self):
|
|
||||||
self.do_create("key-generator")
|
|
||||||
|
|
||||||
def test_stats_gatherer(self):
|
def test_stats_gatherer(self):
|
||||||
self.do_create("stats-gatherer")
|
self.do_create("stats-gatherer")
|
||||||
|
|
||||||
|
@ -651,83 +648,3 @@ class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin,
|
||||||
self.failUnlessIn("does not look like a directory at all", err)
|
self.failUnlessIn("does not look like a directory at all", err)
|
||||||
d.addCallback(_cb3)
|
d.addCallback(_cb3)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def test_keygen(self):
|
|
||||||
self.skip_if_cannot_daemonize()
|
|
||||||
|
|
||||||
basedir = self.workdir("test_keygen")
|
|
||||||
c1 = os.path.join(basedir, "c1")
|
|
||||||
twistd_pid_file = os.path.join(c1, "twistd.pid")
|
|
||||||
keygen_furl_file = os.path.join(c1, "key_generator.furl")
|
|
||||||
|
|
||||||
d = self.run_bintahoe(["--quiet", "create-key-generator", "--basedir", c1])
|
|
||||||
def _cb(res):
|
|
||||||
out, err, rc_or_sig = res
|
|
||||||
self.failUnlessEqual(rc_or_sig, 0)
|
|
||||||
d.addCallback(_cb)
|
|
||||||
|
|
||||||
def _start(res):
|
|
||||||
return self.run_bintahoe(["--quiet", "start", c1])
|
|
||||||
d.addCallback(_start)
|
|
||||||
|
|
||||||
def _cb2(res):
|
|
||||||
out, err, rc_or_sig = res
|
|
||||||
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
|
|
||||||
self.failUnlessEqual(rc_or_sig, 0, errstr)
|
|
||||||
self.failUnlessEqual(out, "", errstr)
|
|
||||||
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
|
|
||||||
|
|
||||||
# the parent (twistd) has exited. However, twistd writes the pid
|
|
||||||
# from the child, not the parent, so we can't expect twistd.pid
|
|
||||||
# to exist quite yet.
|
|
||||||
|
|
||||||
# the node is running, but it might not have made it past the
|
|
||||||
# first reactor turn yet, and if we kill it too early, it won't
|
|
||||||
# remove the twistd.pid file. So wait until it does something
|
|
||||||
# that we know it won't do until after the first turn.
|
|
||||||
d.addCallback(_cb2)
|
|
||||||
|
|
||||||
def _node_has_started():
|
|
||||||
return os.path.exists(keygen_furl_file)
|
|
||||||
d.addCallback(lambda res: self.poll(_node_has_started))
|
|
||||||
|
|
||||||
def _started(res):
|
|
||||||
self.failUnless(os.path.exists(twistd_pid_file))
|
|
||||||
# rm this so we can detect when the second incarnation is ready
|
|
||||||
os.unlink(keygen_furl_file)
|
|
||||||
return self.run_bintahoe(["--quiet", "restart", c1])
|
|
||||||
d.addCallback(_started)
|
|
||||||
|
|
||||||
def _cb3(res):
|
|
||||||
out, err, rc_or_sig = res
|
|
||||||
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
|
|
||||||
self.failUnlessEqual(rc_or_sig, 0, errstr)
|
|
||||||
self.failUnlessEqual(out, "", errstr)
|
|
||||||
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
|
|
||||||
d.addCallback(_cb3)
|
|
||||||
|
|
||||||
# again, the second incarnation of the node might not be ready yet,
|
|
||||||
# so poll until it is
|
|
||||||
d.addCallback(lambda res: self.poll(_node_has_started))
|
|
||||||
|
|
||||||
# now we can kill it. TODO: On a slow machine, the node might kill
|
|
||||||
# itself before we get a chance too, especially if spawning the
|
|
||||||
# 'tahoe stop' command takes a while.
|
|
||||||
def _stop(res):
|
|
||||||
self.failUnless(os.path.exists(twistd_pid_file))
|
|
||||||
return self.run_bintahoe(["--quiet", "stop", c1])
|
|
||||||
d.addCallback(_stop)
|
|
||||||
|
|
||||||
def _cb4(res):
|
|
||||||
out, err, rc_or_sig = res
|
|
||||||
# the parent has exited by now
|
|
||||||
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
|
|
||||||
self.failUnlessEqual(rc_or_sig, 0, errstr)
|
|
||||||
self.failUnlessEqual(out, "", errstr)
|
|
||||||
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
|
|
||||||
# the parent was supposed to poll and wait until it sees
|
|
||||||
# twistd.pid go away before it exits, so twistd.pid should be
|
|
||||||
# gone by now.
|
|
||||||
self.failIf(os.path.exists(twistd_pid_file))
|
|
||||||
d.addCallback(_cb4)
|
|
||||||
return d
|
|
||||||
|
|
|
@ -475,7 +475,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
|
||||||
NEWERDATA = "this is getting old"
|
NEWERDATA = "this is getting old"
|
||||||
NEWERDATA_uploadable = MutableData(NEWERDATA)
|
NEWERDATA_uploadable = MutableData(NEWERDATA)
|
||||||
|
|
||||||
d = self.set_up_nodes(use_key_generator=True)
|
d = self.set_up_nodes()
|
||||||
|
|
||||||
def _create_mutable(res):
|
def _create_mutable(res):
|
||||||
c = self.clients[0]
|
c = self.clients[0]
|
||||||
|
@ -673,25 +673,6 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
|
||||||
return d1
|
return d1
|
||||||
d.addCallback(_created_dirnode)
|
d.addCallback(_created_dirnode)
|
||||||
|
|
||||||
def wait_for_c3_kg_conn():
|
|
||||||
return self.clients[3]._key_generator is not None
|
|
||||||
d.addCallback(lambda junk: self.poll(wait_for_c3_kg_conn))
|
|
||||||
|
|
||||||
def check_kg_poolsize(junk, size_delta):
|
|
||||||
self.failUnlessEqual(len(self.key_generator_svc.key_generator.keypool),
|
|
||||||
self.key_generator_svc.key_generator.pool_size + size_delta)
|
|
||||||
|
|
||||||
d.addCallback(check_kg_poolsize, 0)
|
|
||||||
d.addCallback(lambda junk:
|
|
||||||
self.clients[3].create_mutable_file(MutableData('hello, world')))
|
|
||||||
d.addCallback(check_kg_poolsize, -1)
|
|
||||||
d.addCallback(lambda junk: self.clients[3].create_dirnode())
|
|
||||||
d.addCallback(check_kg_poolsize, -2)
|
|
||||||
# use_helper induces use of clients[3], which is the using-key_gen client
|
|
||||||
d.addCallback(lambda junk:
|
|
||||||
self.POST("uri?t=mkdir&name=george", use_helper=True))
|
|
||||||
d.addCallback(check_kg_poolsize, -3)
|
|
||||||
|
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def flip_bit(self, good):
|
def flip_bit(self, good):
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
The "key-generator" node type has been removed. This was a standalone process
|
||||||
|
that maintained a queue of RSA keys. Clients could offload the key-generation
|
||||||
|
work by adding "key_generator.furl=" in their tahoe.cfg files, to create
|
||||||
|
mutable files and directories faster. This seemed important back in 2006, but
|
||||||
|
these days computers are faster and RSA key generation only takes about 90ms.
|
||||||
|
|
||||||
|
This removes the "tahoe create-key-generator" command. Any
|
||||||
|
"key_generator.furl" settings in tahoe.cfg will log a warning and otherwise
|
||||||
|
ignored. Attempts to "tahoe start" a previously-generated key-generator node
|
||||||
|
will result in an error.
|
Loading…
Reference in New Issue