replaced all remaining instances of the print statement with the print function #582

Merged
heartsucker merged 1 commits from 3010-remaining-print-functions into master 2019-03-28 02:54:50 +00:00
20 changed files with 292 additions and 267 deletions

1
newsfragments/3010.other Normal file
View File

@ -0,0 +1 @@
Replaced all remaining instances of the print statement with the print function.

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import sys, time, copy import sys, time, copy
from zope.interface import implementer from zope.interface import implementer
@ -181,19 +182,19 @@ class ServerMap:
return (self._last_update_mode, self._last_update_time) return (self._last_update_mode, self._last_update_time)
def dump(self, out=sys.stdout): def dump(self, out=sys.stdout):
print >>out, "servermap:" print("servermap:", file=out)
for ( (server, shnum), (verinfo, timestamp) ) in self._known_shares.items(): for ( (server, shnum), (verinfo, timestamp) ) in self._known_shares.items():
(seqnum, root_hash, IV, segsize, datalength, k, N, prefix, (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
offsets_tuple) = verinfo offsets_tuple) = verinfo
print >>out, ("[%s]: sh#%d seq%d-%s %d-of-%d len%d" % print("[%s]: sh#%d seq%d-%s %d-of-%d len%d" %
(server.get_name(), shnum, (server.get_name(), shnum,
seqnum, base32.b2a(root_hash)[:4], k, N, seqnum, base32.b2a(root_hash)[:4], k, N,
datalength)) datalength), file=out)
if self._problems: if self._problems:
print >>out, "%d PROBLEMS" % len(self._problems) print("%d PROBLEMS" % len(self._problems), file=out)
for f in self._problems: for f in self._problems:
print >>out, str(f) print(str(f), file=out)
return out return out
def all_servers(self): def all_servers(self):

View File

@ -1,3 +1,4 @@
from __future__ import print_function
from twisted.python import usage from twisted.python import usage
from allmydata.scripts.common import BaseOptions from allmydata.scripts.common import BaseOptions
@ -16,8 +17,8 @@ def print_keypair(options):
from allmydata.util.keyutil import make_keypair from allmydata.util.keyutil import make_keypair
out = options.stdout out = options.stdout
privkey_vs, pubkey_vs = make_keypair() privkey_vs, pubkey_vs = make_keypair()
print >>out, "private:", privkey_vs print("private:", privkey_vs, file=out)
print >>out, "public:", pubkey_vs print("public:", pubkey_vs, file=out)
class DerivePubkeyOptions(BaseOptions): class DerivePubkeyOptions(BaseOptions):
def parseArgs(self, privkey): def parseArgs(self, privkey):
@ -40,8 +41,8 @@ def derive_pubkey(options):
from allmydata.util import keyutil from allmydata.util import keyutil
privkey_vs = options.privkey privkey_vs = options.privkey
sk, pubkey_vs = keyutil.parse_privkey(privkey_vs) sk, pubkey_vs = keyutil.parse_privkey(privkey_vs)
print >>out, "private:", privkey_vs print("private:", privkey_vs, file=out)
print >>out, "public:", pubkey_vs print("public:", pubkey_vs, file=out)
return 0 return 0
class AdminCommand(BaseOptions): class AdminCommand(BaseOptions):

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import os.path, sys, time, random, stat import os.path, sys, time, random, stat
@ -73,7 +74,7 @@ def get_backupdb(dbfile, stderr=sys.stderr,
just_create=just_create, dbname="backupdb") just_create=just_create, dbname="backupdb")
return BackupDB_v2(sqlite3, db) return BackupDB_v2(sqlite3, db)
except DBError, e: except DBError, e:
print >>stderr, e print(e, file=stderr)
return None return None

View File

@ -1,3 +1,5 @@
from __future__ import print_function
import os.path, re, fnmatch import os.path, re, fnmatch
from twisted.python import usage from twisted.python import usage
from allmydata.scripts.common import get_aliases, get_default_nodedir, \ from allmydata.scripts.common import get_aliases, get_default_nodedir, \
@ -512,8 +514,8 @@ def get(options):
# enough to have picked an empty file # enough to have picked an empty file
pass pass
else: else:
print >>options.stderr, "%s retrieved and written to %s" % \ print("%s retrieved and written to %s" % \
(options.from_file, options.to_file) (options.from_file, options.to_file), file=options.stderr)
return rc return rc
def put(options): def put(options):

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import os, sys, urllib, textwrap import os, sys, urllib, textwrap
import codecs import codecs
@ -168,7 +169,7 @@ class TahoeError(Exception):
self.msg = msg self.msg = msg
def display(self, err): def display(self, err):
print >>err, self.msg print(self.msg, file=err)
class UnknownAliasError(TahoeError): class UnknownAliasError(TahoeError):

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import os import os
from cStringIO import StringIO from cStringIO import StringIO
@ -90,7 +91,7 @@ def format_http_error(msg, resp):
def check_http_error(resp, stderr): def check_http_error(resp, stderr):
if resp.status < 200 or resp.status >= 300: if resp.status < 200 or resp.status >= 300:
print >>stderr, format_http_error("Error during HTTP request", resp) print(format_http_error("Error during HTTP request", resp), file=stderr)
return 1 return 1

View File

@ -1,3 +1,5 @@
from __future__ import print_function
import os import os
import json import json
@ -339,9 +341,9 @@ def write_client_config(c, config):
@defer.inlineCallbacks @defer.inlineCallbacks
def _get_config_via_wormhole(config): def _get_config_via_wormhole(config):
out = config.stdout out = config.stdout
print >>out, "Opening wormhole with code '{}'".format(config['join']) print("Opening wormhole with code '{}'".format(config['join']), file=out)
relay_url = config.parent['wormhole-server'] relay_url = config.parent['wormhole-server']
print >>out, "Connecting to '{}'".format(relay_url) print("Connecting to '{}'".format(relay_url), file=out)
wh = wormhole.create( wh = wormhole.create(
appid=config.parent['wormhole-invite-appid'], appid=config.parent['wormhole-invite-appid'],
@ -351,7 +353,7 @@ def _get_config_via_wormhole(config):
code = unicode(config['join']) code = unicode(config['join'])
wh.set_code(code) wh.set_code(code)
yield wh.get_welcome() yield wh.get_welcome()
print >>out, "Connected to wormhole server" print("Connected to wormhole server", file=out)
intro = { intro = {
u"abilities": { u"abilities": {
@ -363,14 +365,14 @@ def _get_config_via_wormhole(config):
server_intro = yield wh.get_message() server_intro = yield wh.get_message()
server_intro = json.loads(server_intro) server_intro = json.loads(server_intro)
print >>out, " received server introduction" print(" received server introduction", file=out)
if u'abilities' not in server_intro: if u'abilities' not in server_intro:
raise RuntimeError(" Expected 'abilities' in server introduction") raise RuntimeError(" Expected 'abilities' in server introduction")
if u'server-v1' not in server_intro['abilities']: if u'server-v1' not in server_intro['abilities']:
raise RuntimeError(" Expected 'server-v1' in server abilities") raise RuntimeError(" Expected 'server-v1' in server abilities")
remote_data = yield wh.get_message() remote_data = yield wh.get_message()
print >>out, " received configuration" print(" received configuration", file=out)
defer.returnValue(json.loads(remote_data)) defer.returnValue(json.loads(remote_data))
@ -384,9 +386,9 @@ def create_node(config):
if os.path.exists(basedir): if os.path.exists(basedir):
if listdir_unicode(basedir): if listdir_unicode(basedir):
print >>err, "The base directory %s is not empty." % quote_local_unicode_path(basedir) print("The base directory %s is not empty." % quote_local_unicode_path(basedir), file=err)
print >>err, "To avoid clobbering anything, I am going to quit now." print("To avoid clobbering anything, I am going to quit now.", file=err)
print >>err, "Please use a different directory, or empty this one." print("Please use a different directory, or empty this one.", file=err)
defer.returnValue(-1) defer.returnValue(-1)
# we're willing to use an empty directory # we're willing to use an empty directory
else: else:
@ -398,7 +400,7 @@ def create_node(config):
try: try:
remote_config = yield _get_config_via_wormhole(config) remote_config = yield _get_config_via_wormhole(config)
except RuntimeError as e: except RuntimeError as e:
print >>err, str(e) print(str(e), file=err)
defer.returnValue(1) defer.returnValue(1)
# configuration we'll allow the inviter to set # configuration we'll allow the inviter to set
@ -408,8 +410,8 @@ def create_node(config):
] ]
sensitive_keys = ['introducer'] sensitive_keys = ['introducer']
print >>out, "Encoding: {shares-needed} of {shares-total} shares, on at least {shares-happy} servers".format(**remote_config) print("Encoding: {shares-needed} of {shares-total} shares, on at least {shares-happy} servers".format(**remote_config), file=out)
print >>out, "Overriding the following config:" print("Overriding the following config:", file=out)
for k in whitelist: for k in whitelist:
v = remote_config.get(k, None) v = remote_config.get(k, None)
@ -420,22 +422,22 @@ def create_node(config):
config[k] = v config[k] = v
if k not in sensitive_keys: if k not in sensitive_keys:
if k not in ['shares-happy', 'shares-total', 'shares-needed']: if k not in ['shares-happy', 'shares-total', 'shares-needed']:
print >>out, " {}: {}".format(k, v) print(" {}: {}".format(k, v), file=out)
else: else:
print >>out, " {}: [sensitive data; see tahoe.cfg]".format(k) print(" {}: [sensitive data; see tahoe.cfg]".format(k), file=out)
fileutil.make_dirs(os.path.join(basedir, "private"), 0700) fileutil.make_dirs(os.path.join(basedir, "private"), 0700)
with open(os.path.join(basedir, "tahoe.cfg"), "w") as c: with open(os.path.join(basedir, "tahoe.cfg"), "w") as c:
yield write_node_config(c, config) yield write_node_config(c, config)
write_client_config(c, config) write_client_config(c, config)
print >>out, "Node created in %s" % quote_local_unicode_path(basedir) print("Node created in %s" % quote_local_unicode_path(basedir), file=out)
tahoe_cfg = quote_local_unicode_path(os.path.join(basedir, "tahoe.cfg")) tahoe_cfg = quote_local_unicode_path(os.path.join(basedir, "tahoe.cfg"))
if not config.get("introducer", ""): if not config.get("introducer", ""):
print >>out, " Please set [client]introducer.furl= in %s!" % tahoe_cfg print(" Please set [client]introducer.furl= in %s!" % tahoe_cfg, file=out)
print >>out, " The node cannot connect to a grid without it." print(" The node cannot connect to a grid without it.", file=out)
if not config.get("nickname", ""): if not config.get("nickname", ""):
print >>out, " Please set [node]nickname= in %s" % tahoe_cfg print(" Please set [node]nickname= in %s" % tahoe_cfg, file=out)
defer.returnValue(0) defer.returnValue(0)
def create_client(config): def create_client(config):
@ -454,9 +456,9 @@ def create_introducer(config):
if os.path.exists(basedir): if os.path.exists(basedir):
if listdir_unicode(basedir): if listdir_unicode(basedir):
print >>err, "The base directory %s is not empty." % quote_local_unicode_path(basedir) print("The base directory %s is not empty." % quote_local_unicode_path(basedir), file=err)
print >>err, "To avoid clobbering anything, I am going to quit now." print("To avoid clobbering anything, I am going to quit now.", file=err)
print >>err, "Please use a different directory, or empty this one." print("Please use a different directory, or empty this one.", file=err)
defer.returnValue(-1) defer.returnValue(-1)
# we're willing to use an empty directory # we're willing to use an empty directory
else: else:
@ -467,7 +469,7 @@ def create_introducer(config):
with open(os.path.join(basedir, "tahoe.cfg"), "w") as c: with open(os.path.join(basedir, "tahoe.cfg"), "w") as c:
yield write_node_config(c, config) yield write_node_config(c, config)
print >>out, "Introducer created in %s" % quote_local_unicode_path(basedir) print("Introducer created in %s" % quote_local_unicode_path(basedir), file=out)
defer.returnValue(0) defer.returnValue(0)

View File

@ -1,3 +1,4 @@
from __future__ import print_function
# do not import any allmydata modules at this level. Do that from inside # do not import any allmydata modules at this level. Do that from inside
# individual functions instead. # individual functions instead.
@ -37,7 +38,7 @@ def dump_share(options):
out = options.stdout out = options.stdout
# check the version, to see if we have a mutable or immutable share # check the version, to see if we have a mutable or immutable share
print >>out, "share filename: %s" % quote_output(options['filename']) print("share filename: %s" % quote_output(options['filename']), file=out)
f = open(options['filename'], "rb") f = open(options['filename'], "rb")
prefix = f.read(32) prefix = f.read(32)
@ -55,7 +56,7 @@ def dump_immutable_share(options):
if not options["leases-only"]: if not options["leases-only"]:
dump_immutable_chk_share(f, out, options) dump_immutable_chk_share(f, out, options)
dump_immutable_lease_info(f, out) dump_immutable_lease_info(f, out)
print >>out print(file=out)
return 0 return 0
def dump_immutable_chk_share(f, out, options): def dump_immutable_chk_share(f, out, options):
@ -67,7 +68,7 @@ def dump_immutable_chk_share(f, out, options):
# use a ReadBucketProxy to parse the bucket and find the uri extension # use a ReadBucketProxy to parse the bucket and find the uri extension
bp = ReadBucketProxy(None, None, '') bp = ReadBucketProxy(None, None, '')
offsets = bp._parse_offsets(f.read_share_data(0, 0x44)) offsets = bp._parse_offsets(f.read_share_data(0, 0x44))
print >>out, "%20s: %d" % ("version", bp._version) print("%20s: %d" % ("version", bp._version), file=out)
seek = offsets['uri_extension'] seek = offsets['uri_extension']
length = struct.unpack(bp._fieldstruct, length = struct.unpack(bp._fieldstruct,
f.read_share_data(seek, bp._fieldsize))[0] f.read_share_data(seek, bp._fieldsize))[0]
@ -85,24 +86,24 @@ def dump_immutable_chk_share(f, out, options):
for k in keys1: for k in keys1:
if k in unpacked: if k in unpacked:
dk = display_keys.get(k, k) dk = display_keys.get(k, k)
print >>out, "%20s: %s" % (dk, unpacked[k]) print("%20s: %s" % (dk, unpacked[k]), file=out)
print >>out print(file=out)
for k in keys2: for k in keys2:
if k in unpacked: if k in unpacked:
dk = display_keys.get(k, k) dk = display_keys.get(k, k)
print >>out, "%20s: %s" % (dk, unpacked[k]) print("%20s: %s" % (dk, unpacked[k]), file=out)
print >>out print(file=out)
for k in keys3: for k in keys3:
if k in unpacked: if k in unpacked:
dk = display_keys.get(k, k) dk = display_keys.get(k, k)
print >>out, "%20s: %s" % (dk, unpacked[k]) print("%20s: %s" % (dk, unpacked[k]), file=out)
leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3) leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3)
if leftover: if leftover:
print >>out print(file=out)
print >>out, "LEFTOVER:" print("LEFTOVER:", file=out)
for k in sorted(leftover): for k in sorted(leftover):
print >>out, "%20s: %s" % (k, unpacked[k]) print("%20s: %s" % (k, unpacked[k]), file=out)
# the storage index isn't stored in the share itself, so we depend upon # the storage index isn't stored in the share itself, so we depend upon
# knowing the parent directory name to get it # knowing the parent directory name to get it
@ -116,7 +117,7 @@ def dump_immutable_chk_share(f, out, options):
unpacked["needed_shares"], unpacked["needed_shares"],
unpacked["total_shares"], unpacked["size"]) unpacked["total_shares"], unpacked["size"])
verify_cap = u.to_string() verify_cap = u.to_string()
print >>out, "%20s: %s" % ("verify-cap", quote_output(verify_cap, quotemarks=False)) print("%20s: %s" % ("verify-cap", quote_output(verify_cap, quotemarks=False)), file=out)
sizes = {} sizes = {}
sizes['data'] = (offsets['plaintext_hash_tree'] - sizes['data'] = (offsets['plaintext_hash_tree'] -
@ -124,33 +125,33 @@ def dump_immutable_chk_share(f, out, options):
sizes['validation'] = (offsets['uri_extension'] - sizes['validation'] = (offsets['uri_extension'] -
offsets['plaintext_hash_tree']) offsets['plaintext_hash_tree'])
sizes['uri-extension'] = len(UEB_data) sizes['uri-extension'] = len(UEB_data)
print >>out print(file=out)
print >>out, " Size of data within the share:" print(" Size of data within the share:", file=out)
for k in sorted(sizes): for k in sorted(sizes):
print >>out, "%20s: %s" % (k, sizes[k]) print("%20s: %s" % (k, sizes[k]), file=out)
if options['offsets']: if options['offsets']:
print >>out print(file=out)
print >>out, " Section Offsets:" print(" Section Offsets:", file=out)
print >>out, "%20s: %s" % ("share data", f._data_offset) print("%20s: %s" % ("share data", f._data_offset), file=out)
for k in ["data", "plaintext_hash_tree", "crypttext_hash_tree", for k in ["data", "plaintext_hash_tree", "crypttext_hash_tree",
"block_hashes", "share_hashes", "uri_extension"]: "block_hashes", "share_hashes", "uri_extension"]:
name = {"data": "block data"}.get(k,k) name = {"data": "block data"}.get(k,k)
offset = f._data_offset + offsets[k] offset = f._data_offset + offsets[k]
print >>out, " %20s: %s (0x%x)" % (name, offset, offset) print(" %20s: %s (0x%x)" % (name, offset, offset), file=out)
print >>out, "%20s: %s" % ("leases", f._lease_offset) print("%20s: %s" % ("leases", f._lease_offset), file=out)
def dump_immutable_lease_info(f, out): def dump_immutable_lease_info(f, out):
# display lease information too # display lease information too
print >>out print(file=out)
leases = list(f.get_leases()) leases = list(f.get_leases())
if leases: if leases:
for i,lease in enumerate(leases): for i,lease in enumerate(leases):
when = format_expiration_time(lease.expiration_time) when = format_expiration_time(lease.expiration_time)
print >>out, " Lease #%d: owner=%d, expire in %s" \ print(" Lease #%d: owner=%d, expire in %s" \
% (i, lease.owner_num, when) % (i, lease.owner_num, when), file=out)
else: else:
print >>out, " No leases." print(" No leases.", file=out)
def format_expiration_time(expiration_time): def format_expiration_time(expiration_time):
now = time.time() now = time.time()
@ -186,27 +187,27 @@ def dump_mutable_share(options):
share_type = "MDMF" share_type = "MDMF"
f.close() f.close()
print >>out print(file=out)
print >>out, "Mutable slot found:" print("Mutable slot found:", file=out)
print >>out, " share_type: %s" % share_type print(" share_type: %s" % share_type, file=out)
print >>out, " write_enabler: %s" % base32.b2a(WE) print(" write_enabler: %s" % base32.b2a(WE), file=out)
print >>out, " WE for nodeid: %s" % idlib.nodeid_b2a(nodeid) print(" WE for nodeid: %s" % idlib.nodeid_b2a(nodeid), file=out)
print >>out, " num_extra_leases: %d" % num_extra_leases print(" num_extra_leases: %d" % num_extra_leases, file=out)
print >>out, " container_size: %d" % container_size print(" container_size: %d" % container_size, file=out)
print >>out, " data_length: %d" % data_length print(" data_length: %d" % data_length, file=out)
if leases: if leases:
for (leasenum, lease) in leases: for (leasenum, lease) in leases:
print >>out print(file=out)
print >>out, " Lease #%d:" % leasenum print(" Lease #%d:" % leasenum, file=out)
print >>out, " ownerid: %d" % lease.owner_num print(" ownerid: %d" % lease.owner_num, file=out)
when = format_expiration_time(lease.expiration_time) when = format_expiration_time(lease.expiration_time)
print >>out, " expires in %s" % when print(" expires in %s" % when, file=out)
print >>out, " renew_secret: %s" % base32.b2a(lease.renew_secret) print(" renew_secret: %s" % base32.b2a(lease.renew_secret), file=out)
print >>out, " cancel_secret: %s" % base32.b2a(lease.cancel_secret) print(" cancel_secret: %s" % base32.b2a(lease.cancel_secret), file=out)
print >>out, " secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid) print(" secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid), file=out)
else: else:
print >>out, "No leases." print("No leases.", file=out)
print >>out print(file=out)
if share_type == "SDMF": if share_type == "SDMF":
dump_SDMF_share(m, data_length, options) dump_SDMF_share(m, data_length, options)
@ -248,21 +249,21 @@ def dump_SDMF_share(m, length, options):
(ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize, (ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
ig_datalen, offsets) = unpack_header(data) ig_datalen, offsets) = unpack_header(data)
print >>out, " SDMF contents:" print(" SDMF contents:", file=out)
print >>out, " seqnum: %d" % seqnum print(" seqnum: %d" % seqnum, file=out)
print >>out, " root_hash: %s" % base32.b2a(root_hash) print(" root_hash: %s" % base32.b2a(root_hash), file=out)
print >>out, " IV: %s" % base32.b2a(IV) print(" IV: %s" % base32.b2a(IV), file=out)
print >>out, " required_shares: %d" % k print(" required_shares: %d" % k, file=out)
print >>out, " total_shares: %d" % N print(" total_shares: %d" % N, file=out)
print >>out, " segsize: %d" % segsize print(" segsize: %d" % segsize, file=out)
print >>out, " datalen: %d" % datalen print(" datalen: %d" % datalen, file=out)
print >>out, " enc_privkey: %d bytes" % len(enc_privkey) print(" enc_privkey: %d bytes" % len(enc_privkey), file=out)
print >>out, " pubkey: %d bytes" % len(pubkey) print(" pubkey: %d bytes" % len(pubkey), file=out)
print >>out, " signature: %d bytes" % len(signature) print(" signature: %d bytes" % len(signature), file=out)
share_hash_ids = ",".join(sorted([str(hid) share_hash_ids = ",".join(sorted([str(hid)
for hid in share_hash_chain.keys()])) for hid in share_hash_chain.keys()]))
print >>out, " share_hash_chain: %s" % share_hash_ids print(" share_hash_chain: %s" % share_hash_ids, file=out)
print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree) print(" block_hash_tree: %d nodes" % len(block_hash_tree), file=out)
# the storage index isn't stored in the share itself, so we depend upon # the storage index isn't stored in the share itself, so we depend upon
# knowing the parent directory name to get it # knowing the parent directory name to get it
@ -274,15 +275,15 @@ def dump_SDMF_share(m, length, options):
fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey)
u = SSKVerifierURI(storage_index, fingerprint) u = SSKVerifierURI(storage_index, fingerprint)
verify_cap = u.to_string() verify_cap = u.to_string()
print >>out, " verify-cap:", quote_output(verify_cap, quotemarks=False) print(" verify-cap:", quote_output(verify_cap, quotemarks=False), file=out)
if options['offsets']: if options['offsets']:
# NOTE: this offset-calculation code is fragile, and needs to be # NOTE: this offset-calculation code is fragile, and needs to be
# merged with MutableShareFile's internals. # merged with MutableShareFile's internals.
print >>out print(file=out)
print >>out, " Section Offsets:" print(" Section Offsets:", file=out)
def printoffset(name, value, shift=0): def printoffset(name, value, shift=0):
print >>out, "%s%20s: %s (0x%x)" % (" "*shift, name, value, value) print("%s%20s: %s (0x%x)" % (" "*shift, name, value, value), file=out)
printoffset("first lease", m.HEADER_SIZE) printoffset("first lease", m.HEADER_SIZE)
printoffset("share data", m.DATA_OFFSET) printoffset("share data", m.DATA_OFFSET)
o_seqnum = m.DATA_OFFSET + struct.calcsize(">B") o_seqnum = m.DATA_OFFSET + struct.calcsize(">B")
@ -300,7 +301,7 @@ def dump_SDMF_share(m, length, options):
printoffset("extra leases", m._read_extra_lease_offset(f) + 4) printoffset("extra leases", m._read_extra_lease_offset(f) + 4)
f.close() f.close()
print >>out print(file=out)
def dump_MDMF_share(m, length, options): def dump_MDMF_share(m, length, options):
from allmydata.mutable.layout import MDMFSlotReadProxy from allmydata.mutable.layout import MDMFSlotReadProxy
@ -342,21 +343,21 @@ def dump_MDMF_share(m, length, options):
(seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix, (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
offsets) = verinfo offsets) = verinfo
print >>out, " MDMF contents:" print(" MDMF contents:", file=out)
print >>out, " seqnum: %d" % seqnum print(" seqnum: %d" % seqnum, file=out)
print >>out, " root_hash: %s" % base32.b2a(root_hash) print(" root_hash: %s" % base32.b2a(root_hash), file=out)
#print >>out, " IV: %s" % base32.b2a(IV) #print >>out, " IV: %s" % base32.b2a(IV)
print >>out, " required_shares: %d" % k print(" required_shares: %d" % k, file=out)
print >>out, " total_shares: %d" % N print(" total_shares: %d" % N, file=out)
print >>out, " segsize: %d" % segsize print(" segsize: %d" % segsize, file=out)
print >>out, " datalen: %d" % datalen print(" datalen: %d" % datalen, file=out)
print >>out, " enc_privkey: %d bytes" % len(encprivkey) print(" enc_privkey: %d bytes" % len(encprivkey), file=out)
print >>out, " pubkey: %d bytes" % len(pubkey) print(" pubkey: %d bytes" % len(pubkey), file=out)
print >>out, " signature: %d bytes" % len(signature) print(" signature: %d bytes" % len(signature), file=out)
share_hash_ids = ",".join([str(hid) share_hash_ids = ",".join([str(hid)
for hid in sorted(share_hash_chain.keys())]) for hid in sorted(share_hash_chain.keys())])
print >>out, " share_hash_chain: %s" % share_hash_ids print(" share_hash_chain: %s" % share_hash_ids, file=out)
print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree) print(" block_hash_tree: %d nodes" % len(block_hash_tree), file=out)
# the storage index isn't stored in the share itself, so we depend upon # the storage index isn't stored in the share itself, so we depend upon
# knowing the parent directory name to get it # knowing the parent directory name to get it
@ -368,16 +369,16 @@ def dump_MDMF_share(m, length, options):
fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey)
u = MDMFVerifierURI(storage_index, fingerprint) u = MDMFVerifierURI(storage_index, fingerprint)
verify_cap = u.to_string() verify_cap = u.to_string()
print >>out, " verify-cap:", quote_output(verify_cap, quotemarks=False) print(" verify-cap:", quote_output(verify_cap, quotemarks=False), file=out)
if options['offsets']: if options['offsets']:
# NOTE: this offset-calculation code is fragile, and needs to be # NOTE: this offset-calculation code is fragile, and needs to be
# merged with MutableShareFile's internals. # merged with MutableShareFile's internals.
print >>out print(file=out)
print >>out, " Section Offsets:" print(" Section Offsets:", file=out)
def printoffset(name, value, shift=0): def printoffset(name, value, shift=0):
print >>out, "%s%.20s: %s (0x%x)" % (" "*shift, name, value, value) print("%s%.20s: %s (0x%x)" % (" "*shift, name, value, value), file=out)
printoffset("first lease", m.HEADER_SIZE, 2) printoffset("first lease", m.HEADER_SIZE, 2)
printoffset("share data", m.DATA_OFFSET, 2) printoffset("share data", m.DATA_OFFSET, 2)
o_seqnum = m.DATA_OFFSET + struct.calcsize(">B") o_seqnum = m.DATA_OFFSET + struct.calcsize(">B")
@ -397,7 +398,7 @@ def dump_MDMF_share(m, length, options):
printoffset("extra leases", m._read_extra_lease_offset(f) + 4, 2) printoffset("extra leases", m._read_extra_lease_offset(f) + 4, 2)
f.close() f.close()
print >>out print(file=out)
@ -460,7 +461,7 @@ def dump_cap(options):
u = uri.from_string(cap) u = uri.from_string(cap)
print >>out print(file=out)
dump_uri_instance(u, nodeid, secret, out) dump_uri_instance(u, nodeid, secret, out)
def _dump_secrets(storage_index, secret, nodeid, out): def _dump_secrets(storage_index, secret, nodeid, out):
@ -469,19 +470,19 @@ def _dump_secrets(storage_index, secret, nodeid, out):
if secret: if secret:
crs = hashutil.my_renewal_secret_hash(secret) crs = hashutil.my_renewal_secret_hash(secret)
print >>out, " client renewal secret:", base32.b2a(crs) print(" client renewal secret:", base32.b2a(crs), file=out)
frs = hashutil.file_renewal_secret_hash(crs, storage_index) frs = hashutil.file_renewal_secret_hash(crs, storage_index)
print >>out, " file renewal secret:", base32.b2a(frs) print(" file renewal secret:", base32.b2a(frs), file=out)
if nodeid: if nodeid:
renew = hashutil.bucket_renewal_secret_hash(frs, nodeid) renew = hashutil.bucket_renewal_secret_hash(frs, nodeid)
print >>out, " lease renewal secret:", base32.b2a(renew) print(" lease renewal secret:", base32.b2a(renew), file=out)
ccs = hashutil.my_cancel_secret_hash(secret) ccs = hashutil.my_cancel_secret_hash(secret)
print >>out, " client cancel secret:", base32.b2a(ccs) print(" client cancel secret:", base32.b2a(ccs), file=out)
fcs = hashutil.file_cancel_secret_hash(ccs, storage_index) fcs = hashutil.file_cancel_secret_hash(ccs, storage_index)
print >>out, " file cancel secret:", base32.b2a(fcs) print(" file cancel secret:", base32.b2a(fcs), file=out)
if nodeid: if nodeid:
cancel = hashutil.bucket_cancel_secret_hash(fcs, nodeid) cancel = hashutil.bucket_cancel_secret_hash(fcs, nodeid)
print >>out, " lease cancel secret:", base32.b2a(cancel) print(" lease cancel secret:", base32.b2a(cancel), file=out)
def dump_uri_instance(u, nodeid, secret, out, show_header=True): def dump_uri_instance(u, nodeid, secret, out, show_header=True):
from allmydata import uri from allmydata import uri
@ -491,114 +492,114 @@ def dump_uri_instance(u, nodeid, secret, out, show_header=True):
if isinstance(u, uri.CHKFileURI): if isinstance(u, uri.CHKFileURI):
if show_header: if show_header:
print >>out, "CHK File:" print("CHK File:", file=out)
print >>out, " key:", base32.b2a(u.key) print(" key:", base32.b2a(u.key), file=out)
print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash) print(" UEB hash:", base32.b2a(u.uri_extension_hash), file=out)
print >>out, " size:", u.size print(" size:", u.size, file=out)
print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares) print(" k/N: %d/%d" % (u.needed_shares, u.total_shares), file=out)
print >>out, " storage index:", si_b2a(u.get_storage_index()) print(" storage index:", si_b2a(u.get_storage_index()), file=out)
_dump_secrets(u.get_storage_index(), secret, nodeid, out) _dump_secrets(u.get_storage_index(), secret, nodeid, out)
elif isinstance(u, uri.CHKFileVerifierURI): elif isinstance(u, uri.CHKFileVerifierURI):
if show_header: if show_header:
print >>out, "CHK Verifier URI:" print("CHK Verifier URI:", file=out)
print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash) print(" UEB hash:", base32.b2a(u.uri_extension_hash), file=out)
print >>out, " size:", u.size print(" size:", u.size, file=out)
print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares) print(" k/N: %d/%d" % (u.needed_shares, u.total_shares), file=out)
print >>out, " storage index:", si_b2a(u.get_storage_index()) print(" storage index:", si_b2a(u.get_storage_index()), file=out)
elif isinstance(u, uri.LiteralFileURI): elif isinstance(u, uri.LiteralFileURI):
if show_header: if show_header:
print >>out, "Literal File URI:" print("Literal File URI:", file=out)
print >>out, " data:", quote_output(u.data) print(" data:", quote_output(u.data), file=out)
elif isinstance(u, uri.WriteableSSKFileURI): # SDMF elif isinstance(u, uri.WriteableSSKFileURI): # SDMF
if show_header: if show_header:
print >>out, "SDMF Writeable URI:" print("SDMF Writeable URI:", file=out)
print >>out, " writekey:", base32.b2a(u.writekey) print(" writekey:", base32.b2a(u.writekey), file=out)
print >>out, " readkey:", base32.b2a(u.readkey) print(" readkey:", base32.b2a(u.readkey), file=out)
print >>out, " storage index:", si_b2a(u.get_storage_index()) print(" storage index:", si_b2a(u.get_storage_index()), file=out)
print >>out, " fingerprint:", base32.b2a(u.fingerprint) print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
print >>out print(file=out)
if nodeid: if nodeid:
we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid) we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid)
print >>out, " write_enabler:", base32.b2a(we) print(" write_enabler:", base32.b2a(we), file=out)
print >>out print(file=out)
_dump_secrets(u.get_storage_index(), secret, nodeid, out) _dump_secrets(u.get_storage_index(), secret, nodeid, out)
elif isinstance(u, uri.ReadonlySSKFileURI): elif isinstance(u, uri.ReadonlySSKFileURI):
if show_header: if show_header:
print >>out, "SDMF Read-only URI:" print("SDMF Read-only URI:", file=out)
print >>out, " readkey:", base32.b2a(u.readkey) print(" readkey:", base32.b2a(u.readkey), file=out)
print >>out, " storage index:", si_b2a(u.get_storage_index()) print(" storage index:", si_b2a(u.get_storage_index()), file=out)
print >>out, " fingerprint:", base32.b2a(u.fingerprint) print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
elif isinstance(u, uri.SSKVerifierURI): elif isinstance(u, uri.SSKVerifierURI):
if show_header: if show_header:
print >>out, "SDMF Verifier URI:" print("SDMF Verifier URI:", file=out)
print >>out, " storage index:", si_b2a(u.get_storage_index()) print(" storage index:", si_b2a(u.get_storage_index()), file=out)
print >>out, " fingerprint:", base32.b2a(u.fingerprint) print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
elif isinstance(u, uri.WriteableMDMFFileURI): # MDMF elif isinstance(u, uri.WriteableMDMFFileURI): # MDMF
if show_header: if show_header:
print >>out, "MDMF Writeable URI:" print("MDMF Writeable URI:", file=out)
print >>out, " writekey:", base32.b2a(u.writekey) print(" writekey:", base32.b2a(u.writekey), file=out)
print >>out, " readkey:", base32.b2a(u.readkey) print(" readkey:", base32.b2a(u.readkey), file=out)
print >>out, " storage index:", si_b2a(u.get_storage_index()) print(" storage index:", si_b2a(u.get_storage_index()), file=out)
print >>out, " fingerprint:", base32.b2a(u.fingerprint) print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
print >>out print(file=out)
if nodeid: if nodeid:
we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid) we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid)
print >>out, " write_enabler:", base32.b2a(we) print(" write_enabler:", base32.b2a(we), file=out)
print >>out print(file=out)
_dump_secrets(u.get_storage_index(), secret, nodeid, out) _dump_secrets(u.get_storage_index(), secret, nodeid, out)
elif isinstance(u, uri.ReadonlyMDMFFileURI): elif isinstance(u, uri.ReadonlyMDMFFileURI):
if show_header: if show_header:
print >>out, "MDMF Read-only URI:" print("MDMF Read-only URI:", file=out)
print >>out, " readkey:", base32.b2a(u.readkey) print(" readkey:", base32.b2a(u.readkey), file=out)
print >>out, " storage index:", si_b2a(u.get_storage_index()) print(" storage index:", si_b2a(u.get_storage_index()), file=out)
print >>out, " fingerprint:", base32.b2a(u.fingerprint) print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
elif isinstance(u, uri.MDMFVerifierURI): elif isinstance(u, uri.MDMFVerifierURI):
if show_header: if show_header:
print >>out, "MDMF Verifier URI:" print("MDMF Verifier URI:", file=out)
print >>out, " storage index:", si_b2a(u.get_storage_index()) print(" storage index:", si_b2a(u.get_storage_index()), file=out)
print >>out, " fingerprint:", base32.b2a(u.fingerprint) print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
elif isinstance(u, uri.ImmutableDirectoryURI): # CHK-based directory elif isinstance(u, uri.ImmutableDirectoryURI): # CHK-based directory
if show_header: if show_header:
print >>out, "CHK Directory URI:" print("CHK Directory URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.ImmutableDirectoryURIVerifier): elif isinstance(u, uri.ImmutableDirectoryURIVerifier):
if show_header: if show_header:
print >>out, "CHK Directory Verifier URI:" print("CHK Directory Verifier URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.DirectoryURI): # SDMF-based directory elif isinstance(u, uri.DirectoryURI): # SDMF-based directory
if show_header: if show_header:
print >>out, "Directory Writeable URI:" print("Directory Writeable URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.ReadonlyDirectoryURI): elif isinstance(u, uri.ReadonlyDirectoryURI):
if show_header: if show_header:
print >>out, "Directory Read-only URI:" print("Directory Read-only URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.DirectoryURIVerifier): elif isinstance(u, uri.DirectoryURIVerifier):
if show_header: if show_header:
print >>out, "Directory Verifier URI:" print("Directory Verifier URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.MDMFDirectoryURI): # MDMF-based directory elif isinstance(u, uri.MDMFDirectoryURI): # MDMF-based directory
if show_header: if show_header:
print >>out, "Directory Writeable URI:" print("Directory Writeable URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.ReadonlyMDMFDirectoryURI): elif isinstance(u, uri.ReadonlyMDMFDirectoryURI):
if show_header: if show_header:
print >>out, "Directory Read-only URI:" print("Directory Read-only URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.MDMFDirectoryURIVerifier): elif isinstance(u, uri.MDMFDirectoryURIVerifier):
if show_header: if show_header:
print >>out, "Directory Verifier URI:" print("Directory Verifier URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
else: else:
print >>out, "unknown cap type" print("unknown cap type", file=out)
class FindSharesOptions(BaseOptions): class FindSharesOptions(BaseOptions):
def getSynopsis(self): def getSynopsis(self):
@ -642,7 +643,7 @@ def find_shares(options):
d = os.path.join(d, "storage", "shares", sharedir) d = os.path.join(d, "storage", "shares", sharedir)
if os.path.exists(d): if os.path.exists(d):
for shnum in listdir_unicode(d): for shnum in listdir_unicode(d):
print >>out, quote_local_unicode_path(os.path.join(d, shnum), quotemarks=False) print(quote_local_unicode_path(os.path.join(d, shnum), quotemarks=False), file=out)
return 0 return 0
@ -735,10 +736,10 @@ def describe_share(abs_sharefile, si_s, shnum_s, now, out):
pubkey, signature, share_hash_chain, block_hash_tree, pubkey, signature, share_hash_chain, block_hash_tree,
share_data, enc_privkey) = pieces share_data, enc_privkey) = pieces
print >>out, "SDMF %s %d/%d %d #%d:%s %d %s" % \ print("SDMF %s %d/%d %d #%d:%s %d %s" % \
(si_s, k, N, datalen, (si_s, k, N, datalen,
seqnum, base32.b2a(root_hash), seqnum, base32.b2a(root_hash),
expiration, quote_output(abs_sharefile)) expiration, quote_output(abs_sharefile)), file=out)
elif share_type == "MDMF": elif share_type == "MDMF":
from allmydata.mutable.layout import MDMFSlotReadProxy from allmydata.mutable.layout import MDMFSlotReadProxy
fake_shnum = 0 fake_shnum = 0
@ -764,12 +765,12 @@ def describe_share(abs_sharefile, si_s, shnum_s, now, out):
verinfo = extract(p.get_verinfo) verinfo = extract(p.get_verinfo)
(seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix, (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
offsets) = verinfo offsets) = verinfo
print >>out, "MDMF %s %d/%d %d #%d:%s %d %s" % \ print("MDMF %s %d/%d %d #%d:%s %d %s" % \
(si_s, k, N, datalen, (si_s, k, N, datalen,
seqnum, base32.b2a(root_hash), seqnum, base32.b2a(root_hash),
expiration, quote_output(abs_sharefile)) expiration, quote_output(abs_sharefile)), file=out)
else: else:
print >>out, "UNKNOWN mutable %s" % quote_output(abs_sharefile) print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out)
elif struct.unpack(">L", prefix[:4]) == (1,): elif struct.unpack(">L", prefix[:4]) == (1,):
# immutable # immutable
@ -799,12 +800,12 @@ def describe_share(abs_sharefile, si_s, shnum_s, now, out):
filesize = unpacked["size"] filesize = unpacked["size"]
ueb_hash = unpacked["UEB_hash"] ueb_hash = unpacked["UEB_hash"]
print >>out, "CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize, print("CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
ueb_hash, expiration, ueb_hash, expiration,
quote_output(abs_sharefile)) quote_output(abs_sharefile)), file=out)
else: else:
print >>out, "UNKNOWN really-unknown %s" % quote_output(abs_sharefile) print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile), file=out)
f.close() f.close()
@ -835,7 +836,7 @@ def catalog_shares(options):
si_dir = os.path.join(abbrevdir, si_s) si_dir = os.path.join(abbrevdir, si_s)
catalog_shares_one_abbrevdir(si_s, si_dir, now, out,err) catalog_shares_one_abbrevdir(si_s, si_dir, now, out,err)
except: except:
print >>err, "Error processing %s" % quote_output(abbrevdir) print("Error processing %s" % quote_output(abbrevdir), file=err)
failure.Failure().printTraceback(err) failure.Failure().printTraceback(err)
return 0 return 0
@ -857,10 +858,10 @@ def catalog_shares_one_abbrevdir(si_s, si_dir, now, out, err):
describe_share(abs_sharefile, si_s, shnum_s, now, describe_share(abs_sharefile, si_s, shnum_s, now,
out) out)
except: except:
print >>err, "Error processing %s" % quote_output(abs_sharefile) print("Error processing %s" % quote_output(abs_sharefile), file=err)
failure.Failure().printTraceback(err) failure.Failure().printTraceback(err)
except: except:
print >>err, "Error processing %s" % quote_output(si_dir) print("Error processing %s" % quote_output(si_dir), file=err)
failure.Failure().printTraceback(err) failure.Failure().printTraceback(err)
class CorruptShareOptions(BaseOptions): class CorruptShareOptions(BaseOptions):
@ -900,7 +901,7 @@ def corrupt_share(options):
def flip_bit(start, end): def flip_bit(start, end):
offset = random.randrange(start, end) offset = random.randrange(start, end)
bit = random.randrange(0, 8) bit = random.randrange(0, 8)
print >>out, "[%d..%d): %d.b%d" % (start, end, offset, bit) print("[%d..%d): %d.b%d" % (start, end, offset, bit), file=out)
f = open(fn, "rb+") f = open(fn, "rb+")
f.seek(offset) f.seek(offset)
d = f.read(1) d = f.read(1)
@ -945,7 +946,7 @@ class ReplOptions(BaseOptions):
return "Usage: tahoe debug repl (OBSOLETE)" return "Usage: tahoe debug repl (OBSOLETE)"
def repl(options): def repl(options):
print >>options.stderr, "'tahoe debug repl' is obsolete. Please run 'python' in a virtualenv." print("'tahoe debug repl' is obsolete. Please run 'python' in a virtualenv.", file=options.stderr)
return 1 return 1
@ -956,7 +957,7 @@ class TrialOptions(BaseOptions):
return "Usage: tahoe debug trial (OBSOLETE)" return "Usage: tahoe debug trial (OBSOLETE)"
def trial(config): def trial(config):
print >>config.stderr, "'tahoe debug trial' is obsolete. Please run 'tox', or use 'trial' in a virtualenv." print("'tahoe debug trial' is obsolete. Please run 'tox', or use 'trial' in a virtualenv.", file=config.stderr)
return 1 return 1
def fixOptionsClass( (subcmd, shortcut, OptionsClass, desc) ): def fixOptionsClass( (subcmd, shortcut, OptionsClass, desc) ):
@ -994,7 +995,7 @@ subcommand.
return t return t
def opt_help(self): def opt_help(self):
print str(self) print(str(self))
sys.exit(0) sys.exit(0)
def flogtool(config): def flogtool(config):

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import os import os
import urllib import urllib
@ -84,7 +85,7 @@ def create(options):
maybe_upgrade_magic_folders(options["node-directory"]) maybe_upgrade_magic_folders(options["node-directory"])
folders = load_magic_folders(options["node-directory"]) folders = load_magic_folders(options["node-directory"])
if options['name'] in folders: if options['name'] in folders:
print >>options.stderr, "Already have a magic-folder named '{}'".format(options['name']) print("Already have a magic-folder named '{}'".format(options['name']), file=options.stderr)
return 1 return 1
# create an alias; this basically just remembers the cap for the # create an alias; this basically just remembers the cap for the
@ -95,23 +96,23 @@ def create(options):
rc = tahoe_add_alias.create_alias(create_alias_options) rc = tahoe_add_alias.create_alias(create_alias_options)
if rc != 0: if rc != 0:
print >>options.stderr, create_alias_options.stderr.getvalue() print(create_alias_options.stderr.getvalue(), file=options.stderr)
return rc return rc
print >>options.stdout, create_alias_options.stdout.getvalue() print(create_alias_options.stdout.getvalue(), file=options.stdout)
if options.nickname is not None: if options.nickname is not None:
print >>options.stdout, u"Inviting myself as client '{}':".format(options.nickname) print(u"Inviting myself as client '{}':".format(options.nickname), file=options.stdout)
invite_options = _delegate_options(options, InviteOptions()) invite_options = _delegate_options(options, InviteOptions())
invite_options.alias = options.alias invite_options.alias = options.alias
invite_options.nickname = options.nickname invite_options.nickname = options.nickname
invite_options['name'] = options['name'] invite_options['name'] = options['name']
rc = invite(invite_options) rc = invite(invite_options)
if rc != 0: if rc != 0:
print >>options.stderr, u"magic-folder: failed to invite after create\n" print(u"magic-folder: failed to invite after create\n", file=options.stderr)
print >>options.stderr, invite_options.stderr.getvalue() print(invite_options.stderr.getvalue(), file=options.stderr)
return rc return rc
invite_code = invite_options.stdout.getvalue().strip() invite_code = invite_options.stdout.getvalue().strip()
print >>options.stdout, u" created invite code" print(u" created invite code", file=options.stdout)
join_options = _delegate_options(options, JoinOptions()) join_options = _delegate_options(options, JoinOptions())
join_options['poll-interval'] = options['poll-interval'] join_options['poll-interval'] = options['poll-interval']
join_options.nickname = options.nickname join_options.nickname = options.nickname
@ -119,15 +120,15 @@ def create(options):
join_options.invite_code = invite_code join_options.invite_code = invite_code
rc = join(join_options) rc = join(join_options)
if rc != 0: if rc != 0:
print >>options.stderr, u"magic-folder: failed to join after create\n" print(u"magic-folder: failed to join after create\n", file=options.stderr)
print >>options.stderr, join_options.stderr.getvalue() print(join_options.stderr.getvalue(), file=options.stderr)
return rc return rc
print >>options.stdout, u" joined new magic-folder" print(u" joined new magic-folder", file=options.stdout)
print >>options.stdout, ( print(
u"Successfully created magic-folder '{}' with alias '{}:' " u"Successfully created magic-folder '{}' with alias '{}:' "
u"and client '{}'\nYou must re-start your node before the " u"and client '{}'\nYou must re-start your node before the "
u"magic-folder will be active." u"magic-folder will be active."
).format(options['name'], options.alias, options.nickname) .format(options['name'], options.alias, options.nickname), file=options.stdout)
return 0 return 0
@ -158,7 +159,7 @@ def _list_json(options, folders):
info[name] = { info[name] = {
u"directory": details["directory"], u"directory": details["directory"],
} }
print >>options.stdout, json.dumps(info) print(json.dumps(info), file=options.stdout)
return 0 return 0
@ -167,13 +168,13 @@ def _list_human(options, folders):
List our magic-folders for a human user List our magic-folders for a human user
""" """
if folders: if folders:
print >>options.stdout, "This client has the following magic-folders:" print("This client has the following magic-folders:", file=options.stdout)
biggest = max([len(nm) for nm in folders.keys()]) biggest = max([len(nm) for nm in folders.keys()])
fmt = " {:>%d}: {}" % (biggest, ) fmt = " {:>%d}: {}" % (biggest, )
for name, details in folders.items(): for name, details in folders.items():
print >>options.stdout, fmt.format(name, details["directory"]) print(fmt.format(name, details["directory"]), file=options.stdout)
else: else:
print >>options.stdout, "No magic-folders" print("No magic-folders", file=options.stdout)
class InviteOptions(BasedirOptions): class InviteOptions(BasedirOptions):
@ -212,14 +213,14 @@ def invite(options):
rc = tahoe_mkdir.mkdir(mkdir_options) rc = tahoe_mkdir.mkdir(mkdir_options)
if rc != 0: if rc != 0:
print >>options.stderr, "magic-folder: failed to mkdir\n" print("magic-folder: failed to mkdir\n", file=options.stderr)
return rc return rc
# FIXME this assumes caps are ASCII. # FIXME this assumes caps are ASCII.
dmd_write_cap = mkdir_options.stdout.getvalue().strip() dmd_write_cap = mkdir_options.stdout.getvalue().strip()
dmd_readonly_cap = uri.from_string(dmd_write_cap).get_readonly().to_string() dmd_readonly_cap = uri.from_string(dmd_write_cap).get_readonly().to_string()
if dmd_readonly_cap is None: if dmd_readonly_cap is None:
print >>options.stderr, "magic-folder: failed to diminish dmd write cap\n" print("magic-folder: failed to diminish dmd write cap\n", file=options.stderr)
return 1 return 1
magic_write_cap = get_aliases(options["node-directory"])[options.alias] magic_write_cap = get_aliases(options["node-directory"])[options.alias]
@ -231,12 +232,12 @@ def invite(options):
ln_options.to_file = u"%s/%s" % (unicode(magic_write_cap, 'utf-8'), options.nickname) ln_options.to_file = u"%s/%s" % (unicode(magic_write_cap, 'utf-8'), options.nickname)
rc = tahoe_mv.mv(ln_options, mode="link") rc = tahoe_mv.mv(ln_options, mode="link")
if rc != 0: if rc != 0:
print >>options.stderr, "magic-folder: failed to create link\n" print("magic-folder: failed to create link\n", file=options.stderr)
print >>options.stderr, ln_options.stderr.getvalue() print(ln_options.stderr.getvalue(), file=options.stderr)
return rc return rc
# FIXME: this assumes caps are ASCII. # FIXME: this assumes caps are ASCII.
print >>options.stdout, "%s%s%s" % (magic_readonly_cap, INVITE_SEPARATOR, dmd_write_cap) print("%s%s%s" % (magic_readonly_cap, INVITE_SEPARATOR, dmd_write_cap), file=options.stdout)
return 0 return 0
class JoinOptions(BasedirOptions): class JoinOptions(BasedirOptions):
@ -272,7 +273,7 @@ def join(options):
existing_folders = load_magic_folders(options["node-directory"]) existing_folders = load_magic_folders(options["node-directory"])
if options['name'] in existing_folders: if options['name'] in existing_folders:
print >>options.stderr, "This client already has a magic-folder named '{}'".format(options['name']) print("This client already has a magic-folder named '{}'".format(options['name']), file=options.stderr)
return 1 return 1
db_fname = os.path.join( db_fname = os.path.join(
@ -281,7 +282,7 @@ def join(options):
u"magicfolder_{}.sqlite".format(options['name']), u"magicfolder_{}.sqlite".format(options['name']),
) )
if os.path.exists(db_fname): if os.path.exists(db_fname):
print >>options.stderr, "Database '{}' already exists; not overwriting".format(db_fname) print("Database '{}' already exists; not overwriting".format(db_fname), file=options.stderr)
return 1 return 1
folder = { folder = {
@ -309,11 +310,11 @@ def leave(options):
existing_folders = load_magic_folders(options["node-directory"]) existing_folders = load_magic_folders(options["node-directory"])
if not existing_folders: if not existing_folders:
print >>options.stderr, "No magic-folders at all" print("No magic-folders at all", file=options.stderr)
return 1 return 1
if options["name"] not in existing_folders: if options["name"] not in existing_folders:
print >>options.stderr, "No such magic-folder '{}'".format(options["name"]) print("No such magic-folder '{}'".format(options["name"]), file=options.stderr)
return 1 return 1
privdir = os.path.join(options["node-directory"], u"private") privdir = os.path.join(options["node-directory"], u"private")
@ -327,8 +328,8 @@ def leave(options):
try: try:
fileutil.remove(db_fname) fileutil.remove(db_fname)
except Exception as e: except Exception as e:
print >>options.stderr, ("Warning: unable to remove %s due to %s: %s" print("Warning: unable to remove %s due to %s: %s"
% (quote_local_unicode_path(db_fname), e.__class__.__name__, str(e))) % (quote_local_unicode_path(db_fname), e.__class__.__name__, str(e)), file=options.stderr)
# if this was the last magic-folder, disable them entirely # if this was the last magic-folder, disable them entirely
if not existing_folders: if not existing_folders:
@ -422,7 +423,7 @@ def _print_item_status(item, now, longest):
prog = '%s %s' % (verb, abbreviate_time(now - when)) prog = '%s %s' % (verb, abbreviate_time(now - when))
break break
print " %s: %s" % (paddedname, prog) print(" %s: %s" % (paddedname, prog))
def status(options): def status(options):
@ -433,7 +434,7 @@ def status(options):
with open(os.path.join(nodedir, u'private', u'api_auth_token'), 'rb') as f: with open(os.path.join(nodedir, u'private', u'api_auth_token'), 'rb') as f:
token = f.read() token = f.read()
print >>stdout, "Magic-folder status for '{}':".format(options["name"]) print("Magic-folder status for '{}':".format(options["name"]), file=stdout)
if options["name"] not in magic_folders: if options["name"] not in magic_folders:
raise Exception( raise Exception(
@ -458,23 +459,23 @@ def status(options):
) )
) )
except Exception as e: except Exception as e:
print >>stderr, "failed to retrieve data: %s" % str(e) print("failed to retrieve data: %s" % str(e), file=stderr)
return 2 return 2
for d in [dmd_data, remote_data, magic_data]: for d in [dmd_data, remote_data, magic_data]:
if isinstance(d, dict) and 'error' in d: if isinstance(d, dict) and 'error' in d:
print >>stderr, "Error from server: %s" % d['error'] print("Error from server: %s" % d['error'], file=stderr)
print >>stderr, "This means we can't retrieve the remote shared directory." print("This means we can't retrieve the remote shared directory.", file=stderr)
return 3 return 3
captype, dmd = dmd_data captype, dmd = dmd_data
if captype != 'dirnode': if captype != 'dirnode':
print >>stderr, "magic_folder_dircap isn't a directory capability" print("magic_folder_dircap isn't a directory capability", file=stderr)
return 2 return 2
now = datetime.now() now = datetime.now()
print >>stdout, "Local files:" print("Local files:", file=stdout)
for (name, child) in dmd['children'].items(): for (name, child) in dmd['children'].items():
captype, meta = child captype, meta = child
status = 'good' status = 'good'
@ -484,28 +485,28 @@ def status(options):
nice_size = abbreviate_space(size) nice_size = abbreviate_space(size)
nice_created = abbreviate_time(now - created) nice_created = abbreviate_time(now - created)
if captype != 'filenode': if captype != 'filenode':
print >>stdout, "%20s: error, should be a filecap" % name print("%20s: error, should be a filecap" % name, file=stdout)
continue continue
print >>stdout, " %s (%s): %s, version=%s, created %s" % (name, nice_size, status, version, nice_created) print(" %s (%s): %s, version=%s, created %s" % (name, nice_size, status, version, nice_created), file=stdout)
print >>stdout print(file=stdout)
print >>stdout, "Remote files:" print("Remote files:", file=stdout)
captype, collective = remote_data captype, collective = remote_data
for (name, data) in collective['children'].items(): for (name, data) in collective['children'].items():
if data[0] != 'dirnode': if data[0] != 'dirnode':
print >>stdout, "Error: '%s': expected a dirnode, not '%s'" % (name, data[0]) print("Error: '%s': expected a dirnode, not '%s'" % (name, data[0]), file=stdout)
print >>stdout, " %s's remote:" % name print(" %s's remote:" % name, file=stdout)
dmd = _get_json_for_cap(options, data[1]['ro_uri']) dmd = _get_json_for_cap(options, data[1]['ro_uri'])
if isinstance(dmd, dict) and 'error' in dmd: if isinstance(dmd, dict) and 'error' in dmd:
print >>stdout, " Error: could not retrieve directory" print(" Error: could not retrieve directory", file=stdout)
continue continue
if dmd[0] != 'dirnode': if dmd[0] != 'dirnode':
print >>stdout, "Error: should be a dirnode" print("Error: should be a dirnode", file=stdout)
continue continue
for (n, d) in dmd[1]['children'].items(): for (n, d) in dmd[1]['children'].items():
if d[0] != 'filenode': if d[0] != 'filenode':
print >>stdout, "Error: expected '%s' to be a filenode." % (n,) print("Error: expected '%s' to be a filenode." % (n,), file=stdout)
meta = d[1] meta = d[1]
status = 'good' status = 'good'
@ -514,7 +515,7 @@ def status(options):
version = meta['metadata']['version'] version = meta['metadata']['version']
nice_size = abbreviate_space(size) nice_size = abbreviate_space(size)
nice_created = abbreviate_time(now - created) nice_created = abbreviate_time(now - created)
print >>stdout, " %s (%s): %s, version=%s, created %s" % (n, nice_size, status, version, nice_created) print(" %s (%s): %s, version=%s, created %s" % (n, nice_size, status, version, nice_created), file=stdout)
if len(magic_data): if len(magic_data):
uploads = [item for item in magic_data if item['kind'] == 'upload'] uploads = [item for item in magic_data if item['kind'] == 'upload']
@ -526,20 +527,20 @@ def status(options):
downloads = [item for item in downloads if item['status'] != 'success'] downloads = [item for item in downloads if item['status'] != 'success']
if len(uploads): if len(uploads):
print print()
print >>stdout, "Uploads:" print("Uploads:", file=stdout)
for item in uploads: for item in uploads:
_print_item_status(item, now, longest) _print_item_status(item, now, longest)
if len(downloads): if len(downloads):
print print()
print >>stdout, "Downloads:" print("Downloads:", file=stdout)
for item in downloads: for item in downloads:
_print_item_status(item, now, longest) _print_item_status(item, now, longest)
for item in magic_data: for item in magic_data:
if item['status'] == 'failure': if item['status'] == 'failure':
print >>stdout, "Failed:", item print("Failed:", item, file=stdout)
return 0 return 0
@ -595,7 +596,7 @@ def do_magic_folder(options):
try: try:
return f(so) return f(so)
except Exception as e: except Exception as e:
print >>options.stderr, "Error: %s" % (e,) print("Error: %s" % (e,), file=options.stderr)
if options['debug']: if options['debug']:
raise raise

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import os, sys import os, sys
from six.moves import StringIO from six.moves import StringIO
@ -86,12 +87,12 @@ class Options(usage.Options):
def opt_version(self): def opt_version(self):
import allmydata import allmydata
print >>self.stdout, allmydata.get_package_versions_string(debug=True) print(allmydata.get_package_versions_string(debug=True), file=self.stdout)
self.no_command_needed = True self.no_command_needed = True
def opt_version_and_path(self): def opt_version_and_path(self):
import allmydata import allmydata
print >>self.stdout, allmydata.get_package_versions_string(show_paths=True, debug=True) print(allmydata.get_package_versions_string(show_paths=True, debug=True), file=self.stdout)
self.no_command_needed = True self.no_command_needed = True
opt_eliot_destination = opt_eliot_destination opt_eliot_destination = opt_eliot_destination
@ -133,12 +134,12 @@ def parse_or_exit_with_explanation(argv, stdout=sys.stdout):
c = config c = config
while hasattr(c, 'subOptions'): while hasattr(c, 'subOptions'):
c = c.subOptions c = c.subOptions
print >>stdout, str(c) print(str(c), file=stdout)
try: try:
msg = e.args[0].decode(get_io_encoding()) msg = e.args[0].decode(get_io_encoding())
except Exception: except Exception:
msg = repr(e) msg = repr(e)
print >>stdout, "%s: %s\n" % (sys.argv[0], quote_output(msg, quotemarks=False)) print("%s: %s\n" % (sys.argv[0], quote_output(msg, quotemarks=False)), file=stdout)
sys.exit(1) sys.exit(1)
return config return config

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import os, time import os, time
from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path, \ from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path, \
@ -33,7 +34,7 @@ class SlowOperationRunner:
url = self.make_url(url, ophandle) url = self.make_url(url, ophandle)
resp = do_http("POST", url) resp = do_http("POST", url)
if resp.status not in (200, 302): if resp.status not in (200, 302):
print >>stderr, format_http_error("ERROR", resp) print(format_http_error("ERROR", resp), file=stderr)
return 1 return 1
# now we poll for results. We nominally poll at t=1, 5, 10, 30, 60, # now we poll for results. We nominally poll at t=1, 5, 10, 30, 60,
# 90, k*120 seconds, but if the poll takes non-zero time, that will # 90, k*120 seconds, but if the poll takes non-zero time, that will
@ -66,7 +67,7 @@ class SlowOperationRunner:
stderr = self.options.stderr stderr = self.options.stderr
resp = do_http("GET", url) resp = do_http("GET", url)
if resp.status != 200: if resp.status != 200:
print >>stderr, format_http_error("ERROR", resp) print(format_http_error("ERROR", resp), file=stderr)
return True return True
jdata = resp.read() jdata = resp.read()
data = json.loads(jdata) data = json.loads(jdata)
@ -74,9 +75,9 @@ class SlowOperationRunner:
return False return False
if self.options.get("raw"): if self.options.get("raw"):
if is_printable_ascii(jdata): if is_printable_ascii(jdata):
print >>stdout, jdata print(jdata, file=stdout)
else: else:
print >>stderr, "The JSON response contained unprintable characters:\n%s" % quote_output(jdata) print("The JSON response contained unprintable characters:\n%s" % quote_output(jdata), file=stderr)
return True return True
self.write_results(data) self.write_results(data)
return True return True

View File

@ -1,3 +1,5 @@
from __future__ import print_function
import os import os
from twisted.python import usage from twisted.python import usage
from allmydata.scripts.common import NoDefaultBasedirOptions from allmydata.scripts.common import NoDefaultBasedirOptions
@ -64,9 +66,9 @@ def create_stats_gatherer(config):
if os.path.exists(basedir): if os.path.exists(basedir):
if listdir_unicode(basedir): if listdir_unicode(basedir):
print >>err, "The base directory %s is not empty." % quote_output(basedir) print("The base directory %s is not empty." % quote_output(basedir), file=err)
print >>err, "To avoid clobbering anything, I am going to quit now." print("To avoid clobbering anything, I am going to quit now.", file=err)
print >>err, "Please use a different directory, or empty this one." print("Please use a different directory, or empty this one.", file=err)
return -1 return -1
# we're willing to use an empty directory # we're willing to use an empty directory
else: else:

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import json import json
import os import os
@ -196,7 +197,7 @@ class StatsGatherer(Referenceable, service.MultiService):
def remote_provide(self, provider, nickname): def remote_provide(self, provider, nickname):
tubid = self.get_tubid(provider) tubid = self.get_tubid(provider)
if tubid == '<unauth>': if tubid == '<unauth>':
print "WARNING: failed to get tubid for %s (%s)" % (provider, nickname) print("WARNING: failed to get tubid for %s (%s)" % (provider, nickname))
# don't add to clients to poll (polluting data) don't care about disconnect # don't add to clients to poll (polluting data) don't care about disconnect
return return
self.clients[tubid] = provider self.clients[tubid] = provider
@ -229,15 +230,15 @@ class StdOutStatsGatherer(StatsGatherer):
def remote_provide(self, provider, nickname): def remote_provide(self, provider, nickname):
tubid = self.get_tubid(provider) tubid = self.get_tubid(provider)
if self.verbose: if self.verbose:
print 'connect "%s" [%s]' % (nickname, tubid) print('connect "%s" [%s]' % (nickname, tubid))
provider.notifyOnDisconnect(self.announce_lost_client, tubid) provider.notifyOnDisconnect(self.announce_lost_client, tubid)
StatsGatherer.remote_provide(self, provider, nickname) StatsGatherer.remote_provide(self, provider, nickname)
def announce_lost_client(self, tubid): def announce_lost_client(self, tubid):
print 'disconnect "%s" [%s]' % (self.nicknames[tubid], tubid) print('disconnect "%s" [%s]' % (self.nicknames[tubid], tubid))
def got_stats(self, stats, tubid, nickname): def got_stats(self, stats, tubid, nickname):
print '"%s" [%s]:' % (nickname, tubid) print('"%s" [%s]:' % (nickname, tubid))
pprint.pprint(stats) pprint.pprint(stats)
class JSONStatsGatherer(StdOutStatsGatherer): class JSONStatsGatherer(StdOutStatsGatherer):

View File

@ -1,3 +1,5 @@
from __future__ import print_function
""" """
Futz with files like a pro. Futz with files like a pro.
""" """
@ -567,7 +569,7 @@ if sys.platform == "win32":
abspath = abspath[4 :] abspath = abspath[4 :]
drive = os.path.splitdrive(abspath)[0] drive = os.path.splitdrive(abspath)[0]
print "flushing %r" % (drive,) print("flushing %r" % (drive,))
hVolume = CreateFileW(u"\\\\.\\" + drive, hVolume = CreateFileW(u"\\\\.\\" + drive,
GENERIC_WRITE, GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE,

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import time import time
from twisted.internet import task from twisted.internet import task
@ -45,6 +46,6 @@ class PollMixin:
if not e.check(*self._poll_should_ignore_these_errors): if not e.check(*self._poll_should_ignore_these_errors):
errs.append(e) errs.append(e)
if errs: if errs:
print errs print(errs)
self.fail("Errors snooped, terminating early") self.fail("Errors snooped, terminating early")

View File

@ -1,3 +1,5 @@
from __future__ import print_function
class Spans: class Spans:
"""I represent a compressed list of booleans, one per index (an integer). """I represent a compressed list of booleans, one per index (an integer).
@ -40,7 +42,7 @@ class Spans:
assert start > prev_end assert start > prev_end
prev_end = start+length prev_end = start+length
except AssertionError: except AssertionError:
print "BAD:", self.dump() print("BAD:", self.dump())
raise raise
def add(self, start, length): def add(self, start, length):
@ -265,7 +267,7 @@ class DataSpans:
for start, data in self.spans[1:]: for start, data in self.spans[1:]:
if not start > prev_end: if not start > prev_end:
# adjacent or overlapping: bad # adjacent or overlapping: bad
print "ASSERTION FAILED", self.spans print("ASSERTION FAILED", self.spans)
raise AssertionError raise AssertionError
def get(self, start, length): def get(self, start, length):

View File

@ -5,7 +5,7 @@
# either the GNU General Public License, version 2 or later, or under the # either the GNU General Public License, version 2 or later, or under the
# Transitive Grace Period Public License, version 1 or later. # Transitive Grace Period Public License, version 1 or later.
from __future__ import division from __future__ import division, print_function
from allmydata.util.mathutil import round_sigfigs from allmydata.util.mathutil import round_sigfigs
import math import math
import sys import sys
@ -97,7 +97,7 @@ def print_pmf(pmf, n=4, out=sys.stdout):
significant digits. significant digits.
""" """
for k, p in enumerate(pmf): for k, p in enumerate(pmf):
print >>out, "i=" + str(k) + ":", round_sigfigs(p, n) print("i=" + str(k) + ":", round_sigfigs(p, n), file=out)
def pr_backup_file_loss(p_list, backup_p, k): def pr_backup_file_loss(p_list, backup_p, k):
""" """

View File

@ -1,3 +1,4 @@
from __future__ import print_function
done = False done = False
@ -33,7 +34,7 @@ def initialize():
# So be paranoid about catching errors and reporting them to original_stderr, # So be paranoid about catching errors and reporting them to original_stderr,
# so that we can at least see them. # so that we can at least see them.
def _complain(message): def _complain(message):
print >>original_stderr, isinstance(message, str) and message or repr(message) print(isinstance(message, str) and message or repr(message), file=original_stderr)
log.msg(message, level=log.WEIRD) log.msg(message, level=log.WEIRD)
# Work around <http://bugs.python.org/issue6058>. # Work around <http://bugs.python.org/issue6058>.

View File

@ -2,6 +2,8 @@
# Windows near-equivalent to twisted.internet.inotify # Windows near-equivalent to twisted.internet.inotify
# This should only be imported on Windows. # This should only be imported on Windows.
from __future__ import print_function
import os, sys import os, sys
from eliot import ( from eliot import (
@ -193,12 +195,12 @@ def simple_test():
hDirectory = _open_directory(path_u) hDirectory = _open_directory(path_u)
fni = FileNotifyInformation() fni = FileNotifyInformation()
print "Waiting..." print("Waiting...")
while True: while True:
fni.read_changes(hDirectory, recursive, filter) fni.read_changes(hDirectory, recursive, filter)
print repr(fni.data) print(repr(fni.data))
for info in fni: for info in fni:
print info print(info)
def medium_test(): def medium_test():
from twisted.python.filepath import FilePath from twisted.python.filepath import FilePath