replaced all remaining instances of the print statement with the print function #582

Merged
heartsucker merged 1 commits from 3010-remaining-print-functions into master 2019-03-28 02:54:50 +00:00
20 changed files with 292 additions and 267 deletions

1
newsfragments/3010.other Normal file
View File

@ -0,0 +1 @@
Replaced all remaining instances of the print statement with the print function.

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import sys, time, copy
from zope.interface import implementer
@ -181,19 +182,19 @@ class ServerMap:
return (self._last_update_mode, self._last_update_time)
def dump(self, out=sys.stdout):
print >>out, "servermap:"
print("servermap:", file=out)
for ( (server, shnum), (verinfo, timestamp) ) in self._known_shares.items():
(seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
offsets_tuple) = verinfo
print >>out, ("[%s]: sh#%d seq%d-%s %d-of-%d len%d" %
print("[%s]: sh#%d seq%d-%s %d-of-%d len%d" %
(server.get_name(), shnum,
seqnum, base32.b2a(root_hash)[:4], k, N,
datalength))
datalength), file=out)
if self._problems:
print >>out, "%d PROBLEMS" % len(self._problems)
print("%d PROBLEMS" % len(self._problems), file=out)
for f in self._problems:
print >>out, str(f)
print(str(f), file=out)
return out
def all_servers(self):

View File

@ -1,3 +1,4 @@
from __future__ import print_function
from twisted.python import usage
from allmydata.scripts.common import BaseOptions
@ -16,8 +17,8 @@ def print_keypair(options):
from allmydata.util.keyutil import make_keypair
out = options.stdout
privkey_vs, pubkey_vs = make_keypair()
print >>out, "private:", privkey_vs
print >>out, "public:", pubkey_vs
print("private:", privkey_vs, file=out)
print("public:", pubkey_vs, file=out)
class DerivePubkeyOptions(BaseOptions):
def parseArgs(self, privkey):
@ -40,8 +41,8 @@ def derive_pubkey(options):
from allmydata.util import keyutil
privkey_vs = options.privkey
sk, pubkey_vs = keyutil.parse_privkey(privkey_vs)
print >>out, "private:", privkey_vs
print >>out, "public:", pubkey_vs
print("private:", privkey_vs, file=out)
print("public:", pubkey_vs, file=out)
return 0
class AdminCommand(BaseOptions):

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import os.path, sys, time, random, stat
@ -73,7 +74,7 @@ def get_backupdb(dbfile, stderr=sys.stderr,
just_create=just_create, dbname="backupdb")
return BackupDB_v2(sqlite3, db)
except DBError, e:
print >>stderr, e
print(e, file=stderr)
return None

View File

@ -1,3 +1,5 @@
from __future__ import print_function
import os.path, re, fnmatch
from twisted.python import usage
from allmydata.scripts.common import get_aliases, get_default_nodedir, \
@ -512,8 +514,8 @@ def get(options):
# enough to have picked an empty file
pass
else:
print >>options.stderr, "%s retrieved and written to %s" % \
(options.from_file, options.to_file)
print("%s retrieved and written to %s" % \
(options.from_file, options.to_file), file=options.stderr)
return rc
def put(options):

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import os, sys, urllib, textwrap
import codecs
@ -168,7 +169,7 @@ class TahoeError(Exception):
self.msg = msg
def display(self, err):
print >>err, self.msg
print(self.msg, file=err)
class UnknownAliasError(TahoeError):

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import os
from cStringIO import StringIO
@ -90,7 +91,7 @@ def format_http_error(msg, resp):
def check_http_error(resp, stderr):
if resp.status < 200 or resp.status >= 300:
print >>stderr, format_http_error("Error during HTTP request", resp)
print(format_http_error("Error during HTTP request", resp), file=stderr)
return 1

View File

@ -1,3 +1,5 @@
from __future__ import print_function
import os
import json
@ -339,9 +341,9 @@ def write_client_config(c, config):
@defer.inlineCallbacks
def _get_config_via_wormhole(config):
out = config.stdout
print >>out, "Opening wormhole with code '{}'".format(config['join'])
print("Opening wormhole with code '{}'".format(config['join']), file=out)
relay_url = config.parent['wormhole-server']
print >>out, "Connecting to '{}'".format(relay_url)
print("Connecting to '{}'".format(relay_url), file=out)
wh = wormhole.create(
appid=config.parent['wormhole-invite-appid'],
@ -351,7 +353,7 @@ def _get_config_via_wormhole(config):
code = unicode(config['join'])
wh.set_code(code)
yield wh.get_welcome()
print >>out, "Connected to wormhole server"
print("Connected to wormhole server", file=out)
intro = {
u"abilities": {
@ -363,14 +365,14 @@ def _get_config_via_wormhole(config):
server_intro = yield wh.get_message()
server_intro = json.loads(server_intro)
print >>out, " received server introduction"
print(" received server introduction", file=out)
if u'abilities' not in server_intro:
raise RuntimeError(" Expected 'abilities' in server introduction")
if u'server-v1' not in server_intro['abilities']:
raise RuntimeError(" Expected 'server-v1' in server abilities")
remote_data = yield wh.get_message()
print >>out, " received configuration"
print(" received configuration", file=out)
defer.returnValue(json.loads(remote_data))
@ -384,9 +386,9 @@ def create_node(config):
if os.path.exists(basedir):
if listdir_unicode(basedir):
print >>err, "The base directory %s is not empty." % quote_local_unicode_path(basedir)
print >>err, "To avoid clobbering anything, I am going to quit now."
print >>err, "Please use a different directory, or empty this one."
print("The base directory %s is not empty." % quote_local_unicode_path(basedir), file=err)
print("To avoid clobbering anything, I am going to quit now.", file=err)
print("Please use a different directory, or empty this one.", file=err)
defer.returnValue(-1)
# we're willing to use an empty directory
else:
@ -398,7 +400,7 @@ def create_node(config):
try:
remote_config = yield _get_config_via_wormhole(config)
except RuntimeError as e:
print >>err, str(e)
print(str(e), file=err)
defer.returnValue(1)
# configuration we'll allow the inviter to set
@ -408,8 +410,8 @@ def create_node(config):
]
sensitive_keys = ['introducer']
print >>out, "Encoding: {shares-needed} of {shares-total} shares, on at least {shares-happy} servers".format(**remote_config)
print >>out, "Overriding the following config:"
print("Encoding: {shares-needed} of {shares-total} shares, on at least {shares-happy} servers".format(**remote_config), file=out)
print("Overriding the following config:", file=out)
for k in whitelist:
v = remote_config.get(k, None)
@ -420,22 +422,22 @@ def create_node(config):
config[k] = v
if k not in sensitive_keys:
if k not in ['shares-happy', 'shares-total', 'shares-needed']:
print >>out, " {}: {}".format(k, v)
print(" {}: {}".format(k, v), file=out)
else:
print >>out, " {}: [sensitive data; see tahoe.cfg]".format(k)
print(" {}: [sensitive data; see tahoe.cfg]".format(k), file=out)
fileutil.make_dirs(os.path.join(basedir, "private"), 0700)
with open(os.path.join(basedir, "tahoe.cfg"), "w") as c:
yield write_node_config(c, config)
write_client_config(c, config)
print >>out, "Node created in %s" % quote_local_unicode_path(basedir)
print("Node created in %s" % quote_local_unicode_path(basedir), file=out)
tahoe_cfg = quote_local_unicode_path(os.path.join(basedir, "tahoe.cfg"))
if not config.get("introducer", ""):
print >>out, " Please set [client]introducer.furl= in %s!" % tahoe_cfg
print >>out, " The node cannot connect to a grid without it."
print(" Please set [client]introducer.furl= in %s!" % tahoe_cfg, file=out)
print(" The node cannot connect to a grid without it.", file=out)
if not config.get("nickname", ""):
print >>out, " Please set [node]nickname= in %s" % tahoe_cfg
print(" Please set [node]nickname= in %s" % tahoe_cfg, file=out)
defer.returnValue(0)
def create_client(config):
@ -454,9 +456,9 @@ def create_introducer(config):
if os.path.exists(basedir):
if listdir_unicode(basedir):
print >>err, "The base directory %s is not empty." % quote_local_unicode_path(basedir)
print >>err, "To avoid clobbering anything, I am going to quit now."
print >>err, "Please use a different directory, or empty this one."
print("The base directory %s is not empty." % quote_local_unicode_path(basedir), file=err)
print("To avoid clobbering anything, I am going to quit now.", file=err)
print("Please use a different directory, or empty this one.", file=err)
defer.returnValue(-1)
# we're willing to use an empty directory
else:
@ -467,7 +469,7 @@ def create_introducer(config):
with open(os.path.join(basedir, "tahoe.cfg"), "w") as c:
yield write_node_config(c, config)
print >>out, "Introducer created in %s" % quote_local_unicode_path(basedir)
print("Introducer created in %s" % quote_local_unicode_path(basedir), file=out)
defer.returnValue(0)

View File

@ -1,3 +1,4 @@
from __future__ import print_function
# do not import any allmydata modules at this level. Do that from inside
# individual functions instead.
@ -37,7 +38,7 @@ def dump_share(options):
out = options.stdout
# check the version, to see if we have a mutable or immutable share
print >>out, "share filename: %s" % quote_output(options['filename'])
print("share filename: %s" % quote_output(options['filename']), file=out)
f = open(options['filename'], "rb")
prefix = f.read(32)
@ -55,7 +56,7 @@ def dump_immutable_share(options):
if not options["leases-only"]:
dump_immutable_chk_share(f, out, options)
dump_immutable_lease_info(f, out)
print >>out
print(file=out)
return 0
def dump_immutable_chk_share(f, out, options):
@ -67,7 +68,7 @@ def dump_immutable_chk_share(f, out, options):
# use a ReadBucketProxy to parse the bucket and find the uri extension
bp = ReadBucketProxy(None, None, '')
offsets = bp._parse_offsets(f.read_share_data(0, 0x44))
print >>out, "%20s: %d" % ("version", bp._version)
print("%20s: %d" % ("version", bp._version), file=out)
seek = offsets['uri_extension']
length = struct.unpack(bp._fieldstruct,
f.read_share_data(seek, bp._fieldsize))[0]
@ -85,24 +86,24 @@ def dump_immutable_chk_share(f, out, options):
for k in keys1:
if k in unpacked:
dk = display_keys.get(k, k)
print >>out, "%20s: %s" % (dk, unpacked[k])
print >>out
print("%20s: %s" % (dk, unpacked[k]), file=out)
print(file=out)
for k in keys2:
if k in unpacked:
dk = display_keys.get(k, k)
print >>out, "%20s: %s" % (dk, unpacked[k])
print >>out
print("%20s: %s" % (dk, unpacked[k]), file=out)
print(file=out)
for k in keys3:
if k in unpacked:
dk = display_keys.get(k, k)
print >>out, "%20s: %s" % (dk, unpacked[k])
print("%20s: %s" % (dk, unpacked[k]), file=out)
leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3)
if leftover:
print >>out
print >>out, "LEFTOVER:"
print(file=out)
print("LEFTOVER:", file=out)
for k in sorted(leftover):
print >>out, "%20s: %s" % (k, unpacked[k])
print("%20s: %s" % (k, unpacked[k]), file=out)
# the storage index isn't stored in the share itself, so we depend upon
# knowing the parent directory name to get it
@ -116,7 +117,7 @@ def dump_immutable_chk_share(f, out, options):
unpacked["needed_shares"],
unpacked["total_shares"], unpacked["size"])
verify_cap = u.to_string()
print >>out, "%20s: %s" % ("verify-cap", quote_output(verify_cap, quotemarks=False))
print("%20s: %s" % ("verify-cap", quote_output(verify_cap, quotemarks=False)), file=out)
sizes = {}
sizes['data'] = (offsets['plaintext_hash_tree'] -
@ -124,33 +125,33 @@ def dump_immutable_chk_share(f, out, options):
sizes['validation'] = (offsets['uri_extension'] -
offsets['plaintext_hash_tree'])
sizes['uri-extension'] = len(UEB_data)
print >>out
print >>out, " Size of data within the share:"
print(file=out)
print(" Size of data within the share:", file=out)
for k in sorted(sizes):
print >>out, "%20s: %s" % (k, sizes[k])
print("%20s: %s" % (k, sizes[k]), file=out)
if options['offsets']:
print >>out
print >>out, " Section Offsets:"
print >>out, "%20s: %s" % ("share data", f._data_offset)
print(file=out)
print(" Section Offsets:", file=out)
print("%20s: %s" % ("share data", f._data_offset), file=out)
for k in ["data", "plaintext_hash_tree", "crypttext_hash_tree",
"block_hashes", "share_hashes", "uri_extension"]:
name = {"data": "block data"}.get(k,k)
offset = f._data_offset + offsets[k]
print >>out, " %20s: %s (0x%x)" % (name, offset, offset)
print >>out, "%20s: %s" % ("leases", f._lease_offset)
print(" %20s: %s (0x%x)" % (name, offset, offset), file=out)
print("%20s: %s" % ("leases", f._lease_offset), file=out)
def dump_immutable_lease_info(f, out):
# display lease information too
print >>out
print(file=out)
leases = list(f.get_leases())
if leases:
for i,lease in enumerate(leases):
when = format_expiration_time(lease.expiration_time)
print >>out, " Lease #%d: owner=%d, expire in %s" \
% (i, lease.owner_num, when)
print(" Lease #%d: owner=%d, expire in %s" \
% (i, lease.owner_num, when), file=out)
else:
print >>out, " No leases."
print(" No leases.", file=out)
def format_expiration_time(expiration_time):
now = time.time()
@ -186,27 +187,27 @@ def dump_mutable_share(options):
share_type = "MDMF"
f.close()
print >>out
print >>out, "Mutable slot found:"
print >>out, " share_type: %s" % share_type
print >>out, " write_enabler: %s" % base32.b2a(WE)
print >>out, " WE for nodeid: %s" % idlib.nodeid_b2a(nodeid)
print >>out, " num_extra_leases: %d" % num_extra_leases
print >>out, " container_size: %d" % container_size
print >>out, " data_length: %d" % data_length
print(file=out)
print("Mutable slot found:", file=out)
print(" share_type: %s" % share_type, file=out)
print(" write_enabler: %s" % base32.b2a(WE), file=out)
print(" WE for nodeid: %s" % idlib.nodeid_b2a(nodeid), file=out)
print(" num_extra_leases: %d" % num_extra_leases, file=out)
print(" container_size: %d" % container_size, file=out)
print(" data_length: %d" % data_length, file=out)
if leases:
for (leasenum, lease) in leases:
print >>out
print >>out, " Lease #%d:" % leasenum
print >>out, " ownerid: %d" % lease.owner_num
print(file=out)
print(" Lease #%d:" % leasenum, file=out)
print(" ownerid: %d" % lease.owner_num, file=out)
when = format_expiration_time(lease.expiration_time)
print >>out, " expires in %s" % when
print >>out, " renew_secret: %s" % base32.b2a(lease.renew_secret)
print >>out, " cancel_secret: %s" % base32.b2a(lease.cancel_secret)
print >>out, " secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid)
print(" expires in %s" % when, file=out)
print(" renew_secret: %s" % base32.b2a(lease.renew_secret), file=out)
print(" cancel_secret: %s" % base32.b2a(lease.cancel_secret), file=out)
print(" secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid), file=out)
else:
print >>out, "No leases."
print >>out
print("No leases.", file=out)
print(file=out)
if share_type == "SDMF":
dump_SDMF_share(m, data_length, options)
@ -248,21 +249,21 @@ def dump_SDMF_share(m, length, options):
(ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
ig_datalen, offsets) = unpack_header(data)
print >>out, " SDMF contents:"
print >>out, " seqnum: %d" % seqnum
print >>out, " root_hash: %s" % base32.b2a(root_hash)
print >>out, " IV: %s" % base32.b2a(IV)
print >>out, " required_shares: %d" % k
print >>out, " total_shares: %d" % N
print >>out, " segsize: %d" % segsize
print >>out, " datalen: %d" % datalen
print >>out, " enc_privkey: %d bytes" % len(enc_privkey)
print >>out, " pubkey: %d bytes" % len(pubkey)
print >>out, " signature: %d bytes" % len(signature)
print(" SDMF contents:", file=out)
print(" seqnum: %d" % seqnum, file=out)
print(" root_hash: %s" % base32.b2a(root_hash), file=out)
print(" IV: %s" % base32.b2a(IV), file=out)
print(" required_shares: %d" % k, file=out)
print(" total_shares: %d" % N, file=out)
print(" segsize: %d" % segsize, file=out)
print(" datalen: %d" % datalen, file=out)
print(" enc_privkey: %d bytes" % len(enc_privkey), file=out)
print(" pubkey: %d bytes" % len(pubkey), file=out)
print(" signature: %d bytes" % len(signature), file=out)
share_hash_ids = ",".join(sorted([str(hid)
for hid in share_hash_chain.keys()]))
print >>out, " share_hash_chain: %s" % share_hash_ids
print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree)
print(" share_hash_chain: %s" % share_hash_ids, file=out)
print(" block_hash_tree: %d nodes" % len(block_hash_tree), file=out)
# the storage index isn't stored in the share itself, so we depend upon
# knowing the parent directory name to get it
@ -274,15 +275,15 @@ def dump_SDMF_share(m, length, options):
fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey)
u = SSKVerifierURI(storage_index, fingerprint)
verify_cap = u.to_string()
print >>out, " verify-cap:", quote_output(verify_cap, quotemarks=False)
print(" verify-cap:", quote_output(verify_cap, quotemarks=False), file=out)
if options['offsets']:
# NOTE: this offset-calculation code is fragile, and needs to be
# merged with MutableShareFile's internals.
print >>out
print >>out, " Section Offsets:"
print(file=out)
print(" Section Offsets:", file=out)
def printoffset(name, value, shift=0):
print >>out, "%s%20s: %s (0x%x)" % (" "*shift, name, value, value)
print("%s%20s: %s (0x%x)" % (" "*shift, name, value, value), file=out)
printoffset("first lease", m.HEADER_SIZE)
printoffset("share data", m.DATA_OFFSET)
o_seqnum = m.DATA_OFFSET + struct.calcsize(">B")
@ -300,7 +301,7 @@ def dump_SDMF_share(m, length, options):
printoffset("extra leases", m._read_extra_lease_offset(f) + 4)
f.close()
print >>out
print(file=out)
def dump_MDMF_share(m, length, options):
from allmydata.mutable.layout import MDMFSlotReadProxy
@ -342,21 +343,21 @@ def dump_MDMF_share(m, length, options):
(seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
offsets) = verinfo
print >>out, " MDMF contents:"
print >>out, " seqnum: %d" % seqnum
print >>out, " root_hash: %s" % base32.b2a(root_hash)
print(" MDMF contents:", file=out)
print(" seqnum: %d" % seqnum, file=out)
print(" root_hash: %s" % base32.b2a(root_hash), file=out)
#print >>out, " IV: %s" % base32.b2a(IV)
print >>out, " required_shares: %d" % k
print >>out, " total_shares: %d" % N
print >>out, " segsize: %d" % segsize
print >>out, " datalen: %d" % datalen
print >>out, " enc_privkey: %d bytes" % len(encprivkey)
print >>out, " pubkey: %d bytes" % len(pubkey)
print >>out, " signature: %d bytes" % len(signature)
print(" required_shares: %d" % k, file=out)
print(" total_shares: %d" % N, file=out)
print(" segsize: %d" % segsize, file=out)
print(" datalen: %d" % datalen, file=out)
print(" enc_privkey: %d bytes" % len(encprivkey), file=out)
print(" pubkey: %d bytes" % len(pubkey), file=out)
print(" signature: %d bytes" % len(signature), file=out)
share_hash_ids = ",".join([str(hid)
for hid in sorted(share_hash_chain.keys())])
print >>out, " share_hash_chain: %s" % share_hash_ids
print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree)
print(" share_hash_chain: %s" % share_hash_ids, file=out)
print(" block_hash_tree: %d nodes" % len(block_hash_tree), file=out)
# the storage index isn't stored in the share itself, so we depend upon
# knowing the parent directory name to get it
@ -368,16 +369,16 @@ def dump_MDMF_share(m, length, options):
fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey)
u = MDMFVerifierURI(storage_index, fingerprint)
verify_cap = u.to_string()
print >>out, " verify-cap:", quote_output(verify_cap, quotemarks=False)
print(" verify-cap:", quote_output(verify_cap, quotemarks=False), file=out)
if options['offsets']:
# NOTE: this offset-calculation code is fragile, and needs to be
# merged with MutableShareFile's internals.
print >>out
print >>out, " Section Offsets:"
print(file=out)
print(" Section Offsets:", file=out)
def printoffset(name, value, shift=0):
print >>out, "%s%.20s: %s (0x%x)" % (" "*shift, name, value, value)
print("%s%.20s: %s (0x%x)" % (" "*shift, name, value, value), file=out)
printoffset("first lease", m.HEADER_SIZE, 2)
printoffset("share data", m.DATA_OFFSET, 2)
o_seqnum = m.DATA_OFFSET + struct.calcsize(">B")
@ -397,7 +398,7 @@ def dump_MDMF_share(m, length, options):
printoffset("extra leases", m._read_extra_lease_offset(f) + 4, 2)
f.close()
print >>out
print(file=out)
@ -460,7 +461,7 @@ def dump_cap(options):
u = uri.from_string(cap)
print >>out
print(file=out)
dump_uri_instance(u, nodeid, secret, out)
def _dump_secrets(storage_index, secret, nodeid, out):
@ -469,19 +470,19 @@ def _dump_secrets(storage_index, secret, nodeid, out):
if secret:
crs = hashutil.my_renewal_secret_hash(secret)
print >>out, " client renewal secret:", base32.b2a(crs)
print(" client renewal secret:", base32.b2a(crs), file=out)
frs = hashutil.file_renewal_secret_hash(crs, storage_index)
print >>out, " file renewal secret:", base32.b2a(frs)
print(" file renewal secret:", base32.b2a(frs), file=out)
if nodeid:
renew = hashutil.bucket_renewal_secret_hash(frs, nodeid)
print >>out, " lease renewal secret:", base32.b2a(renew)
print(" lease renewal secret:", base32.b2a(renew), file=out)
ccs = hashutil.my_cancel_secret_hash(secret)
print >>out, " client cancel secret:", base32.b2a(ccs)
print(" client cancel secret:", base32.b2a(ccs), file=out)
fcs = hashutil.file_cancel_secret_hash(ccs, storage_index)
print >>out, " file cancel secret:", base32.b2a(fcs)
print(" file cancel secret:", base32.b2a(fcs), file=out)
if nodeid:
cancel = hashutil.bucket_cancel_secret_hash(fcs, nodeid)
print >>out, " lease cancel secret:", base32.b2a(cancel)
print(" lease cancel secret:", base32.b2a(cancel), file=out)
def dump_uri_instance(u, nodeid, secret, out, show_header=True):
from allmydata import uri
@ -491,114 +492,114 @@ def dump_uri_instance(u, nodeid, secret, out, show_header=True):
if isinstance(u, uri.CHKFileURI):
if show_header:
print >>out, "CHK File:"
print >>out, " key:", base32.b2a(u.key)
print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
print >>out, " size:", u.size
print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
print >>out, " storage index:", si_b2a(u.get_storage_index())
print("CHK File:", file=out)
print(" key:", base32.b2a(u.key), file=out)
print(" UEB hash:", base32.b2a(u.uri_extension_hash), file=out)
print(" size:", u.size, file=out)
print(" k/N: %d/%d" % (u.needed_shares, u.total_shares), file=out)
print(" storage index:", si_b2a(u.get_storage_index()), file=out)
_dump_secrets(u.get_storage_index(), secret, nodeid, out)
elif isinstance(u, uri.CHKFileVerifierURI):
if show_header:
print >>out, "CHK Verifier URI:"
print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
print >>out, " size:", u.size
print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
print >>out, " storage index:", si_b2a(u.get_storage_index())
print("CHK Verifier URI:", file=out)
print(" UEB hash:", base32.b2a(u.uri_extension_hash), file=out)
print(" size:", u.size, file=out)
print(" k/N: %d/%d" % (u.needed_shares, u.total_shares), file=out)
print(" storage index:", si_b2a(u.get_storage_index()), file=out)
elif isinstance(u, uri.LiteralFileURI):
if show_header:
print >>out, "Literal File URI:"
print >>out, " data:", quote_output(u.data)
print("Literal File URI:", file=out)
print(" data:", quote_output(u.data), file=out)
elif isinstance(u, uri.WriteableSSKFileURI): # SDMF
if show_header:
print >>out, "SDMF Writeable URI:"
print >>out, " writekey:", base32.b2a(u.writekey)
print >>out, " readkey:", base32.b2a(u.readkey)
print >>out, " storage index:", si_b2a(u.get_storage_index())
print >>out, " fingerprint:", base32.b2a(u.fingerprint)
print >>out
print("SDMF Writeable URI:", file=out)
print(" writekey:", base32.b2a(u.writekey), file=out)
print(" readkey:", base32.b2a(u.readkey), file=out)
print(" storage index:", si_b2a(u.get_storage_index()), file=out)
print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
print(file=out)
if nodeid:
we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid)
print >>out, " write_enabler:", base32.b2a(we)
print >>out
print(" write_enabler:", base32.b2a(we), file=out)
print(file=out)
_dump_secrets(u.get_storage_index(), secret, nodeid, out)
elif isinstance(u, uri.ReadonlySSKFileURI):
if show_header:
print >>out, "SDMF Read-only URI:"
print >>out, " readkey:", base32.b2a(u.readkey)
print >>out, " storage index:", si_b2a(u.get_storage_index())
print >>out, " fingerprint:", base32.b2a(u.fingerprint)
print("SDMF Read-only URI:", file=out)
print(" readkey:", base32.b2a(u.readkey), file=out)
print(" storage index:", si_b2a(u.get_storage_index()), file=out)
print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
elif isinstance(u, uri.SSKVerifierURI):
if show_header:
print >>out, "SDMF Verifier URI:"
print >>out, " storage index:", si_b2a(u.get_storage_index())
print >>out, " fingerprint:", base32.b2a(u.fingerprint)
print("SDMF Verifier URI:", file=out)
print(" storage index:", si_b2a(u.get_storage_index()), file=out)
print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
elif isinstance(u, uri.WriteableMDMFFileURI): # MDMF
if show_header:
print >>out, "MDMF Writeable URI:"
print >>out, " writekey:", base32.b2a(u.writekey)
print >>out, " readkey:", base32.b2a(u.readkey)
print >>out, " storage index:", si_b2a(u.get_storage_index())
print >>out, " fingerprint:", base32.b2a(u.fingerprint)
print >>out
print("MDMF Writeable URI:", file=out)
print(" writekey:", base32.b2a(u.writekey), file=out)
print(" readkey:", base32.b2a(u.readkey), file=out)
print(" storage index:", si_b2a(u.get_storage_index()), file=out)
print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
print(file=out)
if nodeid:
we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid)
print >>out, " write_enabler:", base32.b2a(we)
print >>out
print(" write_enabler:", base32.b2a(we), file=out)
print(file=out)
_dump_secrets(u.get_storage_index(), secret, nodeid, out)
elif isinstance(u, uri.ReadonlyMDMFFileURI):
if show_header:
print >>out, "MDMF Read-only URI:"
print >>out, " readkey:", base32.b2a(u.readkey)
print >>out, " storage index:", si_b2a(u.get_storage_index())
print >>out, " fingerprint:", base32.b2a(u.fingerprint)
print("MDMF Read-only URI:", file=out)
print(" readkey:", base32.b2a(u.readkey), file=out)
print(" storage index:", si_b2a(u.get_storage_index()), file=out)
print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
elif isinstance(u, uri.MDMFVerifierURI):
if show_header:
print >>out, "MDMF Verifier URI:"
print >>out, " storage index:", si_b2a(u.get_storage_index())
print >>out, " fingerprint:", base32.b2a(u.fingerprint)
print("MDMF Verifier URI:", file=out)
print(" storage index:", si_b2a(u.get_storage_index()), file=out)
print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
elif isinstance(u, uri.ImmutableDirectoryURI): # CHK-based directory
if show_header:
print >>out, "CHK Directory URI:"
print("CHK Directory URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.ImmutableDirectoryURIVerifier):
if show_header:
print >>out, "CHK Directory Verifier URI:"
print("CHK Directory Verifier URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.DirectoryURI): # SDMF-based directory
if show_header:
print >>out, "Directory Writeable URI:"
print("Directory Writeable URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.ReadonlyDirectoryURI):
if show_header:
print >>out, "Directory Read-only URI:"
print("Directory Read-only URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.DirectoryURIVerifier):
if show_header:
print >>out, "Directory Verifier URI:"
print("Directory Verifier URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.MDMFDirectoryURI): # MDMF-based directory
if show_header:
print >>out, "Directory Writeable URI:"
print("Directory Writeable URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.ReadonlyMDMFDirectoryURI):
if show_header:
print >>out, "Directory Read-only URI:"
print("Directory Read-only URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.MDMFDirectoryURIVerifier):
if show_header:
print >>out, "Directory Verifier URI:"
print("Directory Verifier URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
else:
print >>out, "unknown cap type"
print("unknown cap type", file=out)
class FindSharesOptions(BaseOptions):
def getSynopsis(self):
@ -642,7 +643,7 @@ def find_shares(options):
d = os.path.join(d, "storage", "shares", sharedir)
if os.path.exists(d):
for shnum in listdir_unicode(d):
print >>out, quote_local_unicode_path(os.path.join(d, shnum), quotemarks=False)
print(quote_local_unicode_path(os.path.join(d, shnum), quotemarks=False), file=out)
return 0
@ -735,10 +736,10 @@ def describe_share(abs_sharefile, si_s, shnum_s, now, out):
pubkey, signature, share_hash_chain, block_hash_tree,
share_data, enc_privkey) = pieces
print >>out, "SDMF %s %d/%d %d #%d:%s %d %s" % \
print("SDMF %s %d/%d %d #%d:%s %d %s" % \
(si_s, k, N, datalen,
seqnum, base32.b2a(root_hash),
expiration, quote_output(abs_sharefile))
expiration, quote_output(abs_sharefile)), file=out)
elif share_type == "MDMF":
from allmydata.mutable.layout import MDMFSlotReadProxy
fake_shnum = 0
@ -764,12 +765,12 @@ def describe_share(abs_sharefile, si_s, shnum_s, now, out):
verinfo = extract(p.get_verinfo)
(seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
offsets) = verinfo
print >>out, "MDMF %s %d/%d %d #%d:%s %d %s" % \
print("MDMF %s %d/%d %d #%d:%s %d %s" % \
(si_s, k, N, datalen,
seqnum, base32.b2a(root_hash),
expiration, quote_output(abs_sharefile))
expiration, quote_output(abs_sharefile)), file=out)
else:
print >>out, "UNKNOWN mutable %s" % quote_output(abs_sharefile)
print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out)
elif struct.unpack(">L", prefix[:4]) == (1,):
# immutable
@ -799,12 +800,12 @@ def describe_share(abs_sharefile, si_s, shnum_s, now, out):
filesize = unpacked["size"]
ueb_hash = unpacked["UEB_hash"]
print >>out, "CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
print("CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
ueb_hash, expiration,
quote_output(abs_sharefile))
quote_output(abs_sharefile)), file=out)
else:
print >>out, "UNKNOWN really-unknown %s" % quote_output(abs_sharefile)
print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile), file=out)
f.close()
@ -835,7 +836,7 @@ def catalog_shares(options):
si_dir = os.path.join(abbrevdir, si_s)
catalog_shares_one_abbrevdir(si_s, si_dir, now, out,err)
except:
print >>err, "Error processing %s" % quote_output(abbrevdir)
print("Error processing %s" % quote_output(abbrevdir), file=err)
failure.Failure().printTraceback(err)
return 0
@ -857,10 +858,10 @@ def catalog_shares_one_abbrevdir(si_s, si_dir, now, out, err):
describe_share(abs_sharefile, si_s, shnum_s, now,
out)
except:
print >>err, "Error processing %s" % quote_output(abs_sharefile)
print("Error processing %s" % quote_output(abs_sharefile), file=err)
failure.Failure().printTraceback(err)
except:
print >>err, "Error processing %s" % quote_output(si_dir)
print("Error processing %s" % quote_output(si_dir), file=err)
failure.Failure().printTraceback(err)
class CorruptShareOptions(BaseOptions):
@ -900,7 +901,7 @@ def corrupt_share(options):
def flip_bit(start, end):
offset = random.randrange(start, end)
bit = random.randrange(0, 8)
print >>out, "[%d..%d): %d.b%d" % (start, end, offset, bit)
print("[%d..%d): %d.b%d" % (start, end, offset, bit), file=out)
f = open(fn, "rb+")
f.seek(offset)
d = f.read(1)
@ -945,7 +946,7 @@ class ReplOptions(BaseOptions):
return "Usage: tahoe debug repl (OBSOLETE)"
def repl(options):
print >>options.stderr, "'tahoe debug repl' is obsolete. Please run 'python' in a virtualenv."
print("'tahoe debug repl' is obsolete. Please run 'python' in a virtualenv.", file=options.stderr)
return 1
@ -956,7 +957,7 @@ class TrialOptions(BaseOptions):
return "Usage: tahoe debug trial (OBSOLETE)"
def trial(config):
print >>config.stderr, "'tahoe debug trial' is obsolete. Please run 'tox', or use 'trial' in a virtualenv."
print("'tahoe debug trial' is obsolete. Please run 'tox', or use 'trial' in a virtualenv.", file=config.stderr)
return 1
def fixOptionsClass( (subcmd, shortcut, OptionsClass, desc) ):
@ -994,7 +995,7 @@ subcommand.
return t
def opt_help(self):
print str(self)
print(str(self))
sys.exit(0)
def flogtool(config):

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import os
import urllib
@ -84,7 +85,7 @@ def create(options):
maybe_upgrade_magic_folders(options["node-directory"])
folders = load_magic_folders(options["node-directory"])
if options['name'] in folders:
print >>options.stderr, "Already have a magic-folder named '{}'".format(options['name'])
print("Already have a magic-folder named '{}'".format(options['name']), file=options.stderr)
return 1
# create an alias; this basically just remembers the cap for the
@ -95,23 +96,23 @@ def create(options):
rc = tahoe_add_alias.create_alias(create_alias_options)
if rc != 0:
print >>options.stderr, create_alias_options.stderr.getvalue()
print(create_alias_options.stderr.getvalue(), file=options.stderr)
return rc
print >>options.stdout, create_alias_options.stdout.getvalue()
print(create_alias_options.stdout.getvalue(), file=options.stdout)
if options.nickname is not None:
print >>options.stdout, u"Inviting myself as client '{}':".format(options.nickname)
print(u"Inviting myself as client '{}':".format(options.nickname), file=options.stdout)
invite_options = _delegate_options(options, InviteOptions())
invite_options.alias = options.alias
invite_options.nickname = options.nickname
invite_options['name'] = options['name']
rc = invite(invite_options)
if rc != 0:
print >>options.stderr, u"magic-folder: failed to invite after create\n"
print >>options.stderr, invite_options.stderr.getvalue()
print(u"magic-folder: failed to invite after create\n", file=options.stderr)
print(invite_options.stderr.getvalue(), file=options.stderr)
return rc
invite_code = invite_options.stdout.getvalue().strip()
print >>options.stdout, u" created invite code"
print(u" created invite code", file=options.stdout)
join_options = _delegate_options(options, JoinOptions())
join_options['poll-interval'] = options['poll-interval']
join_options.nickname = options.nickname
@ -119,15 +120,15 @@ def create(options):
join_options.invite_code = invite_code
rc = join(join_options)
if rc != 0:
print >>options.stderr, u"magic-folder: failed to join after create\n"
print >>options.stderr, join_options.stderr.getvalue()
print(u"magic-folder: failed to join after create\n", file=options.stderr)
print(join_options.stderr.getvalue(), file=options.stderr)
return rc
print >>options.stdout, u" joined new magic-folder"
print >>options.stdout, (
print(u" joined new magic-folder", file=options.stdout)
print(
u"Successfully created magic-folder '{}' with alias '{}:' "
u"and client '{}'\nYou must re-start your node before the "
u"magic-folder will be active."
).format(options['name'], options.alias, options.nickname)
.format(options['name'], options.alias, options.nickname), file=options.stdout)
return 0
@ -158,7 +159,7 @@ def _list_json(options, folders):
info[name] = {
u"directory": details["directory"],
}
print >>options.stdout, json.dumps(info)
print(json.dumps(info), file=options.stdout)
return 0
@ -167,13 +168,13 @@ def _list_human(options, folders):
List our magic-folders for a human user
"""
if folders:
print >>options.stdout, "This client has the following magic-folders:"
print("This client has the following magic-folders:", file=options.stdout)
biggest = max([len(nm) for nm in folders.keys()])
fmt = " {:>%d}: {}" % (biggest, )
for name, details in folders.items():
print >>options.stdout, fmt.format(name, details["directory"])
print(fmt.format(name, details["directory"]), file=options.stdout)
else:
print >>options.stdout, "No magic-folders"
print("No magic-folders", file=options.stdout)
class InviteOptions(BasedirOptions):
@ -212,14 +213,14 @@ def invite(options):
rc = tahoe_mkdir.mkdir(mkdir_options)
if rc != 0:
print >>options.stderr, "magic-folder: failed to mkdir\n"
print("magic-folder: failed to mkdir\n", file=options.stderr)
return rc
# FIXME this assumes caps are ASCII.
dmd_write_cap = mkdir_options.stdout.getvalue().strip()
dmd_readonly_cap = uri.from_string(dmd_write_cap).get_readonly().to_string()
if dmd_readonly_cap is None:
print >>options.stderr, "magic-folder: failed to diminish dmd write cap\n"
print("magic-folder: failed to diminish dmd write cap\n", file=options.stderr)
return 1
magic_write_cap = get_aliases(options["node-directory"])[options.alias]
@ -231,12 +232,12 @@ def invite(options):
ln_options.to_file = u"%s/%s" % (unicode(magic_write_cap, 'utf-8'), options.nickname)
rc = tahoe_mv.mv(ln_options, mode="link")
if rc != 0:
print >>options.stderr, "magic-folder: failed to create link\n"
print >>options.stderr, ln_options.stderr.getvalue()
print("magic-folder: failed to create link\n", file=options.stderr)
print(ln_options.stderr.getvalue(), file=options.stderr)
return rc
# FIXME: this assumes caps are ASCII.
print >>options.stdout, "%s%s%s" % (magic_readonly_cap, INVITE_SEPARATOR, dmd_write_cap)
print("%s%s%s" % (magic_readonly_cap, INVITE_SEPARATOR, dmd_write_cap), file=options.stdout)
return 0
class JoinOptions(BasedirOptions):
@ -272,7 +273,7 @@ def join(options):
existing_folders = load_magic_folders(options["node-directory"])
if options['name'] in existing_folders:
print >>options.stderr, "This client already has a magic-folder named '{}'".format(options['name'])
print("This client already has a magic-folder named '{}'".format(options['name']), file=options.stderr)
return 1
db_fname = os.path.join(
@ -281,7 +282,7 @@ def join(options):
u"magicfolder_{}.sqlite".format(options['name']),
)
if os.path.exists(db_fname):
print >>options.stderr, "Database '{}' already exists; not overwriting".format(db_fname)
print("Database '{}' already exists; not overwriting".format(db_fname), file=options.stderr)
return 1
folder = {
@ -309,11 +310,11 @@ def leave(options):
existing_folders = load_magic_folders(options["node-directory"])
if not existing_folders:
print >>options.stderr, "No magic-folders at all"
print("No magic-folders at all", file=options.stderr)
return 1
if options["name"] not in existing_folders:
print >>options.stderr, "No such magic-folder '{}'".format(options["name"])
print("No such magic-folder '{}'".format(options["name"]), file=options.stderr)
return 1
privdir = os.path.join(options["node-directory"], u"private")
@ -327,8 +328,8 @@ def leave(options):
try:
fileutil.remove(db_fname)
except Exception as e:
print >>options.stderr, ("Warning: unable to remove %s due to %s: %s"
% (quote_local_unicode_path(db_fname), e.__class__.__name__, str(e)))
print("Warning: unable to remove %s due to %s: %s"
% (quote_local_unicode_path(db_fname), e.__class__.__name__, str(e)), file=options.stderr)
# if this was the last magic-folder, disable them entirely
if not existing_folders:
@ -422,7 +423,7 @@ def _print_item_status(item, now, longest):
prog = '%s %s' % (verb, abbreviate_time(now - when))
break
print " %s: %s" % (paddedname, prog)
print(" %s: %s" % (paddedname, prog))
def status(options):
@ -433,7 +434,7 @@ def status(options):
with open(os.path.join(nodedir, u'private', u'api_auth_token'), 'rb') as f:
token = f.read()
print >>stdout, "Magic-folder status for '{}':".format(options["name"])
print("Magic-folder status for '{}':".format(options["name"]), file=stdout)
if options["name"] not in magic_folders:
raise Exception(
@ -458,23 +459,23 @@ def status(options):
)
)
except Exception as e:
print >>stderr, "failed to retrieve data: %s" % str(e)
print("failed to retrieve data: %s" % str(e), file=stderr)
return 2
for d in [dmd_data, remote_data, magic_data]:
if isinstance(d, dict) and 'error' in d:
print >>stderr, "Error from server: %s" % d['error']
print >>stderr, "This means we can't retrieve the remote shared directory."
print("Error from server: %s" % d['error'], file=stderr)
print("This means we can't retrieve the remote shared directory.", file=stderr)
return 3
captype, dmd = dmd_data
if captype != 'dirnode':
print >>stderr, "magic_folder_dircap isn't a directory capability"
print("magic_folder_dircap isn't a directory capability", file=stderr)
return 2
now = datetime.now()
print >>stdout, "Local files:"
print("Local files:", file=stdout)
for (name, child) in dmd['children'].items():
captype, meta = child
status = 'good'
@ -484,28 +485,28 @@ def status(options):
nice_size = abbreviate_space(size)
nice_created = abbreviate_time(now - created)
if captype != 'filenode':
print >>stdout, "%20s: error, should be a filecap" % name
print("%20s: error, should be a filecap" % name, file=stdout)
continue
print >>stdout, " %s (%s): %s, version=%s, created %s" % (name, nice_size, status, version, nice_created)
print(" %s (%s): %s, version=%s, created %s" % (name, nice_size, status, version, nice_created), file=stdout)
print >>stdout
print >>stdout, "Remote files:"
print(file=stdout)
print("Remote files:", file=stdout)
captype, collective = remote_data
for (name, data) in collective['children'].items():
if data[0] != 'dirnode':
print >>stdout, "Error: '%s': expected a dirnode, not '%s'" % (name, data[0])
print >>stdout, " %s's remote:" % name
print("Error: '%s': expected a dirnode, not '%s'" % (name, data[0]), file=stdout)
print(" %s's remote:" % name, file=stdout)
dmd = _get_json_for_cap(options, data[1]['ro_uri'])
if isinstance(dmd, dict) and 'error' in dmd:
print >>stdout, " Error: could not retrieve directory"
print(" Error: could not retrieve directory", file=stdout)
continue
if dmd[0] != 'dirnode':
print >>stdout, "Error: should be a dirnode"
print("Error: should be a dirnode", file=stdout)
continue
for (n, d) in dmd[1]['children'].items():
if d[0] != 'filenode':
print >>stdout, "Error: expected '%s' to be a filenode." % (n,)
print("Error: expected '%s' to be a filenode." % (n,), file=stdout)
meta = d[1]
status = 'good'
@ -514,7 +515,7 @@ def status(options):
version = meta['metadata']['version']
nice_size = abbreviate_space(size)
nice_created = abbreviate_time(now - created)
print >>stdout, " %s (%s): %s, version=%s, created %s" % (n, nice_size, status, version, nice_created)
print(" %s (%s): %s, version=%s, created %s" % (n, nice_size, status, version, nice_created), file=stdout)
if len(magic_data):
uploads = [item for item in magic_data if item['kind'] == 'upload']
@ -526,20 +527,20 @@ def status(options):
downloads = [item for item in downloads if item['status'] != 'success']
if len(uploads):
print
print >>stdout, "Uploads:"
print()
print("Uploads:", file=stdout)
for item in uploads:
_print_item_status(item, now, longest)
if len(downloads):
print
print >>stdout, "Downloads:"
print()
print("Downloads:", file=stdout)
for item in downloads:
_print_item_status(item, now, longest)
for item in magic_data:
if item['status'] == 'failure':
print >>stdout, "Failed:", item
print("Failed:", item, file=stdout)
return 0
@ -595,7 +596,7 @@ def do_magic_folder(options):
try:
return f(so)
except Exception as e:
print >>options.stderr, "Error: %s" % (e,)
print("Error: %s" % (e,), file=options.stderr)
if options['debug']:
raise

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import os, sys
from six.moves import StringIO
@ -86,12 +87,12 @@ class Options(usage.Options):
def opt_version(self):
import allmydata
print >>self.stdout, allmydata.get_package_versions_string(debug=True)
print(allmydata.get_package_versions_string(debug=True), file=self.stdout)
self.no_command_needed = True
def opt_version_and_path(self):
import allmydata
print >>self.stdout, allmydata.get_package_versions_string(show_paths=True, debug=True)
print(allmydata.get_package_versions_string(show_paths=True, debug=True), file=self.stdout)
self.no_command_needed = True
opt_eliot_destination = opt_eliot_destination
@ -133,12 +134,12 @@ def parse_or_exit_with_explanation(argv, stdout=sys.stdout):
c = config
while hasattr(c, 'subOptions'):
c = c.subOptions
print >>stdout, str(c)
print(str(c), file=stdout)
try:
msg = e.args[0].decode(get_io_encoding())
except Exception:
msg = repr(e)
print >>stdout, "%s: %s\n" % (sys.argv[0], quote_output(msg, quotemarks=False))
print("%s: %s\n" % (sys.argv[0], quote_output(msg, quotemarks=False)), file=stdout)
sys.exit(1)
return config

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import os, time
from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path, \
@ -33,7 +34,7 @@ class SlowOperationRunner:
url = self.make_url(url, ophandle)
resp = do_http("POST", url)
if resp.status not in (200, 302):
print >>stderr, format_http_error("ERROR", resp)
print(format_http_error("ERROR", resp), file=stderr)
return 1
# now we poll for results. We nominally poll at t=1, 5, 10, 30, 60,
# 90, k*120 seconds, but if the poll takes non-zero time, that will
@ -66,7 +67,7 @@ class SlowOperationRunner:
stderr = self.options.stderr
resp = do_http("GET", url)
if resp.status != 200:
print >>stderr, format_http_error("ERROR", resp)
print(format_http_error("ERROR", resp), file=stderr)
return True
jdata = resp.read()
data = json.loads(jdata)
@ -74,9 +75,9 @@ class SlowOperationRunner:
return False
if self.options.get("raw"):
if is_printable_ascii(jdata):
print >>stdout, jdata
print(jdata, file=stdout)
else:
print >>stderr, "The JSON response contained unprintable characters:\n%s" % quote_output(jdata)
print("The JSON response contained unprintable characters:\n%s" % quote_output(jdata), file=stderr)
return True
self.write_results(data)
return True

View File

@ -1,3 +1,5 @@
from __future__ import print_function
import os
from twisted.python import usage
from allmydata.scripts.common import NoDefaultBasedirOptions
@ -64,9 +66,9 @@ def create_stats_gatherer(config):
if os.path.exists(basedir):
if listdir_unicode(basedir):
print >>err, "The base directory %s is not empty." % quote_output(basedir)
print >>err, "To avoid clobbering anything, I am going to quit now."
print >>err, "Please use a different directory, or empty this one."
print("The base directory %s is not empty." % quote_output(basedir), file=err)
print("To avoid clobbering anything, I am going to quit now.", file=err)
print("Please use a different directory, or empty this one.", file=err)
return -1
# we're willing to use an empty directory
else:

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import json
import os
@ -196,7 +197,7 @@ class StatsGatherer(Referenceable, service.MultiService):
def remote_provide(self, provider, nickname):
tubid = self.get_tubid(provider)
if tubid == '<unauth>':
print "WARNING: failed to get tubid for %s (%s)" % (provider, nickname)
print("WARNING: failed to get tubid for %s (%s)" % (provider, nickname))
# don't add to clients to poll (polluting data) don't care about disconnect
return
self.clients[tubid] = provider
@ -229,15 +230,15 @@ class StdOutStatsGatherer(StatsGatherer):
def remote_provide(self, provider, nickname):
tubid = self.get_tubid(provider)
if self.verbose:
print 'connect "%s" [%s]' % (nickname, tubid)
print('connect "%s" [%s]' % (nickname, tubid))
provider.notifyOnDisconnect(self.announce_lost_client, tubid)
StatsGatherer.remote_provide(self, provider, nickname)
def announce_lost_client(self, tubid):
print 'disconnect "%s" [%s]' % (self.nicknames[tubid], tubid)
print('disconnect "%s" [%s]' % (self.nicknames[tubid], tubid))
def got_stats(self, stats, tubid, nickname):
print '"%s" [%s]:' % (nickname, tubid)
print('"%s" [%s]:' % (nickname, tubid))
pprint.pprint(stats)
class JSONStatsGatherer(StdOutStatsGatherer):

View File

@ -1,3 +1,5 @@
from __future__ import print_function
"""
Futz with files like a pro.
"""
@ -567,7 +569,7 @@ if sys.platform == "win32":
abspath = abspath[4 :]
drive = os.path.splitdrive(abspath)[0]
print "flushing %r" % (drive,)
print("flushing %r" % (drive,))
hVolume = CreateFileW(u"\\\\.\\" + drive,
GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE,

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import time
from twisted.internet import task
@ -45,6 +46,6 @@ class PollMixin:
if not e.check(*self._poll_should_ignore_these_errors):
errs.append(e)
if errs:
print errs
print(errs)
self.fail("Errors snooped, terminating early")

View File

@ -1,3 +1,5 @@
from __future__ import print_function
class Spans:
"""I represent a compressed list of booleans, one per index (an integer).
@ -40,7 +42,7 @@ class Spans:
assert start > prev_end
prev_end = start+length
except AssertionError:
print "BAD:", self.dump()
print("BAD:", self.dump())
raise
def add(self, start, length):
@ -265,7 +267,7 @@ class DataSpans:
for start, data in self.spans[1:]:
if not start > prev_end:
# adjacent or overlapping: bad
print "ASSERTION FAILED", self.spans
print("ASSERTION FAILED", self.spans)
raise AssertionError
def get(self, start, length):

View File

@ -5,7 +5,7 @@
# either the GNU General Public License, version 2 or later, or under the
# Transitive Grace Period Public License, version 1 or later.
from __future__ import division
from __future__ import division, print_function
from allmydata.util.mathutil import round_sigfigs
import math
import sys
@ -97,7 +97,7 @@ def print_pmf(pmf, n=4, out=sys.stdout):
significant digits.
"""
for k, p in enumerate(pmf):
print >>out, "i=" + str(k) + ":", round_sigfigs(p, n)
print("i=" + str(k) + ":", round_sigfigs(p, n), file=out)
def pr_backup_file_loss(p_list, backup_p, k):
"""

View File

@ -1,3 +1,4 @@
from __future__ import print_function
done = False
@ -33,7 +34,7 @@ def initialize():
# So be paranoid about catching errors and reporting them to original_stderr,
# so that we can at least see them.
def _complain(message):
print >>original_stderr, isinstance(message, str) and message or repr(message)
print(isinstance(message, str) and message or repr(message), file=original_stderr)
log.msg(message, level=log.WEIRD)
# Work around <http://bugs.python.org/issue6058>.

View File

@ -2,6 +2,8 @@
# Windows near-equivalent to twisted.internet.inotify
# This should only be imported on Windows.
from __future__ import print_function
import os, sys
from eliot import (
@ -193,12 +195,12 @@ def simple_test():
hDirectory = _open_directory(path_u)
fni = FileNotifyInformation()
print "Waiting..."
print("Waiting...")
while True:
fni.read_changes(hDirectory, recursive, filter)
print repr(fni.data)
print(repr(fni.data))
for info in fni:
print info
print(info)
def medium_test():
from twisted.python.filepath import FilePath