setup: organize misc/ scripts and tools and remove obsolete ones
This is for ticket #1068.
This commit is contained in:
parent
6476752467
commit
1fc6be28f4
30
Makefile
30
Makefile
|
@ -119,28 +119,22 @@ test-coverage: build src/allmydata/_version.py
|
|||
$(PYTHON) setup.py trial --reporter=bwverbose-coverage -s $(TEST)
|
||||
|
||||
quicktest:
|
||||
$(PYTHON) misc/run-with-pythonpath.py trial $(TRIALARGS) $(TEST)
|
||||
$(PYTHON) misc/build_helpers/run-with-pythonpath.py trial $(TRIALARGS) $(TEST)
|
||||
|
||||
# code-coverage: install the "coverage" package from PyPI, do "make
|
||||
# quicktest-coverage" to do a unit test run with coverage-gathering enabled,
|
||||
# then use "make coverate-output-text" for a brief report, or "make
|
||||
# coverage-output" for a pretty HTML report. Also see "make .coverage.el" and
|
||||
# misc/coverage.el for emacs integration.
|
||||
# misc/coding_helpers/coverage.el for emacs integration.
|
||||
|
||||
quicktest-coverage:
|
||||
rm -f .coverage
|
||||
$(PYTHON) misc/run-with-pythonpath.py trial --reporter=bwverbose-coverage $(TEST)
|
||||
$(PYTHON) misc/build_helpers/run-with-pythonpath.py trial --reporter=bwverbose-coverage $(TEST)
|
||||
# on my laptop, "quicktest" takes 239s, "quicktest-coverage" takes 304s
|
||||
|
||||
COVERAGE_OMIT = --omit /System,/Library,/usr/lib,src/allmydata/test,support
|
||||
|
||||
# this is like 'coverage report', but includes lines-uncovered
|
||||
coverage-output-text:
|
||||
$(PYTHON) misc/coverage2text.py
|
||||
|
||||
coverage-output:
|
||||
rm -rf coverage-html
|
||||
coverage html -d coverage-html $(COVERAGE_OMIT)
|
||||
coverage html -d coverage-html
|
||||
cp .coverage coverage-html/coverage.data
|
||||
@echo "now point your browser at coverage-html/index.html"
|
||||
|
||||
|
@ -160,7 +154,7 @@ coverage-output:
|
|||
.PHONY: repl test-darcs-boringfile test-clean clean find-trailing-spaces
|
||||
|
||||
.coverage.el: .coverage
|
||||
$(PYTHON) misc/coverage2el.py
|
||||
$(PYTHON) misc/coding_helpers/coverage2el.py
|
||||
|
||||
# 'upload-coverage' is meant to be run with an UPLOAD_TARGET=host:/dir setting
|
||||
ifdef UPLOAD_TARGET
|
||||
|
@ -244,7 +238,7 @@ repl:
|
|||
|
||||
test-darcs-boringfile:
|
||||
$(MAKE)
|
||||
$(PYTHON) misc/test-darcs-boringfile.py
|
||||
$(PYTHON) misc/build_helpers/test-darcs-boringfile.py
|
||||
|
||||
test-clean:
|
||||
find . |grep -vEe "_darcs|allfiles.tmp|src/allmydata/_(version|auto_deps|appname).py" |sort >allfiles.tmp.old
|
||||
|
@ -265,7 +259,7 @@ clean:
|
|||
rm -f bin/tahoe bin/tahoe-script.py
|
||||
|
||||
find-trailing-spaces:
|
||||
$(PYTHON) misc/find-trailing-spaces.py -r src
|
||||
$(PYTHON) misc/coding_tools/find-trailing-spaces.py -r src
|
||||
|
||||
# The test-desert-island target grabs the tahoe-deps tarball, unpacks it,
|
||||
# does a build, then asserts that the build did not try to download anything
|
||||
|
@ -280,7 +274,7 @@ fetch-and-unpack-deps:
|
|||
test-desert-island:
|
||||
$(MAKE) fetch-and-unpack-deps
|
||||
$(MAKE) 2>&1 | tee make.out
|
||||
$(PYTHON) misc/check-build.py make.out no-downloads
|
||||
$(PYTHON) misc/build_helpers/check-build.py make.out no-downloads
|
||||
|
||||
|
||||
# TARBALL GENERATION
|
||||
|
@ -297,7 +291,7 @@ upload-tarballs:
|
|||
|
||||
# DEBIAN PACKAGING
|
||||
|
||||
VER=$(shell $(PYTHON) misc/get-version.py)
|
||||
VER=$(shell $(PYTHON) misc/build_helpers/get-version.py)
|
||||
DEBCOMMENTS="'make deb' build"
|
||||
|
||||
show-version:
|
||||
|
@ -309,7 +303,7 @@ show-pp:
|
|||
.PHONY: deb-etch deb-lenny deb-sid
|
||||
.PHONY: deb-edgy deb-feisty deb-gutsy deb-hardy deb-intrepid deb-jaunty
|
||||
|
||||
# we use misc/$TAHOE_ARCH/debian
|
||||
# we use misc/debian_helpers/$TAHOE_ARCH/debian
|
||||
|
||||
deb-etch: # py2.4
|
||||
$(MAKE) deb-ARCH ARCH=etch TAHOE_ARCH=etch
|
||||
|
@ -352,7 +346,7 @@ endif
|
|||
|
||||
setup-deb: is-known-debian-arch
|
||||
rm -f debian
|
||||
ln -s misc/$(TAHOE_ARCH)/debian debian
|
||||
ln -s misc/debian_helpers/$(TAHOE_ARCH)/debian debian
|
||||
chmod +x debian/rules
|
||||
|
||||
# etch (current debian stable) has python-simplejson-1.3, which doesn't
|
||||
|
@ -419,7 +413,7 @@ deb-jaunty-head:
|
|||
# new experimental debian-packaging-building target
|
||||
.PHONY: EXPERIMENTAL-deb
|
||||
EXPERIMENTAL-deb: is-known-debian-arch
|
||||
$(PYTHON) misc/build-deb.py $(ARCH)
|
||||
$(PYTHON) misc/build_helpers/build-deb.py $(ARCH)
|
||||
|
||||
|
||||
# These targets provide for windows native builds
|
||||
|
|
|
@ -198,7 +198,7 @@ but a few notes are worth stating here:
|
|||
|
||||
* assign each severe (log.WEIRD or higher) event a unique message
|
||||
identifier, as the umid= argument to the log.msg() call. The
|
||||
misc/make_umid script may be useful for this purpose. This will make it
|
||||
misc/coding_tools/make_umid script may be useful for this purpose. This will make it
|
||||
easier to write a classification function for these messages.
|
||||
|
||||
* use the parent= argument whenever the event is causally/temporally
|
||||
|
|
|
@ -254,7 +254,7 @@ Other tools can be built to examine these stats and render them into
|
|||
something useful. For example, a tool could sum the
|
||||
"storage_server.disk_avail' values from all servers to compute a
|
||||
total-disk-available number for the entire grid (however, the "disk watcher"
|
||||
daemon, in misc/spacetime/, is better suited for this specific task).
|
||||
daemon, in misc/operations_helpers/spacetime/, is better suited for this specific task).
|
||||
|
||||
== Using Munin To Graph Stats Values ==
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ diskimage:
|
|||
ifdef UPLOAD_DEST_FURLFILE
|
||||
# N.B.: xfer-client.py requires foolscap, so we hack up PYTHONPATH to improve
|
||||
# the chances of finding it (using the one that we just built)
|
||||
# broken. To fix this just use flappclient. --Zooko 2010-06-06
|
||||
upload:
|
||||
chmod a+r Allmydata-$(VERSION).dmg
|
||||
PYTHONPATH=$(PYTHONPATH):../support/lib/python2.5/site-packages \
|
||||
|
|
|
@ -93,3 +93,4 @@ print_setuptools_ver()
|
|||
print_py_pkg_ver('coverage')
|
||||
print_py_pkg_ver('trialcoverage')
|
||||
print_py_pkg_ver('setuptools_trial')
|
||||
print_py_pkg_ver('pyflakes')
|
|
@ -1,116 +0,0 @@
|
|||
|
||||
import sys
|
||||
from coverage import coverage
|
||||
from coverage.results import Numbers
|
||||
from coverage.summary import SummaryReporter
|
||||
from twisted.python import usage
|
||||
|
||||
# this is an adaptation of the code behind "coverage report", modified to
|
||||
# display+sortby "lines uncovered", which (IMHO) is more important of a
|
||||
# metric than lines covered or percentage covered. Concentrating on the files
|
||||
# with the most uncovered lines encourages getting the tree and test suite
|
||||
# into a state that provides full line-coverage on all files.
|
||||
|
||||
# much of this code was adapted from coverage/summary.py in the 'coverage'
|
||||
# distribution, and is used under their BSD license.
|
||||
|
||||
class Options(usage.Options):
|
||||
optParameters = [
|
||||
("sortby", "s", "uncovered", "how to sort: uncovered, covered, name"),
|
||||
]
|
||||
|
||||
class MyReporter(SummaryReporter):
|
||||
def report(self, outfile=None, sortby="uncovered"):
|
||||
self.find_code_units(None, ["/System", "/Library", "/usr/lib",
|
||||
"support/lib", "src/allmydata/test"])
|
||||
|
||||
# Prepare the formatting strings
|
||||
max_name = max([len(cu.name) for cu in self.code_units] + [5])
|
||||
fmt_name = "%%- %ds " % max_name
|
||||
fmt_err = "%s %s: %s\n"
|
||||
header1 = (fmt_name % "" ) + " Statements "
|
||||
header2 = (fmt_name % "Name") + " Uncovered Covered"
|
||||
fmt_coverage = fmt_name + "%9d %7d "
|
||||
if self.branches:
|
||||
header1 += " Branches "
|
||||
header2 += " Found Excutd"
|
||||
fmt_coverage += " %6d %6d"
|
||||
header1 += " Percent"
|
||||
header2 += " Covered"
|
||||
fmt_coverage += " %7d%%"
|
||||
if self.show_missing:
|
||||
header1 += " "
|
||||
header2 += " Missing"
|
||||
fmt_coverage += " %s"
|
||||
rule = "-" * len(header1) + "\n"
|
||||
header1 += "\n"
|
||||
header2 += "\n"
|
||||
fmt_coverage += "\n"
|
||||
|
||||
if not outfile:
|
||||
outfile = sys.stdout
|
||||
|
||||
# Write the header
|
||||
outfile.write(header1)
|
||||
outfile.write(header2)
|
||||
outfile.write(rule)
|
||||
|
||||
total = Numbers()
|
||||
total_uncovered = 0
|
||||
|
||||
lines = []
|
||||
for cu in self.code_units:
|
||||
try:
|
||||
analysis = self.coverage._analyze(cu)
|
||||
nums = analysis.numbers
|
||||
uncovered = nums.n_statements - nums.n_executed
|
||||
total_uncovered += uncovered
|
||||
args = (cu.name, uncovered, nums.n_executed)
|
||||
if self.branches:
|
||||
args += (nums.n_branches, nums.n_executed_branches)
|
||||
args += (nums.pc_covered,)
|
||||
if self.show_missing:
|
||||
args += (analysis.missing_formatted(),)
|
||||
if sortby == "covered":
|
||||
sortkey = nums.pc_covered
|
||||
elif sortby == "uncovered":
|
||||
sortkey = uncovered
|
||||
else:
|
||||
sortkey = cu.name
|
||||
lines.append((sortkey, fmt_coverage % args))
|
||||
total += nums
|
||||
except KeyboardInterrupt: # pragma: no cover
|
||||
raise
|
||||
except:
|
||||
if not self.ignore_errors:
|
||||
typ, msg = sys.exc_info()[:2]
|
||||
outfile.write(fmt_err % (cu.name, typ.__name__, msg))
|
||||
lines.sort()
|
||||
if sortby in ("uncovered", "covered"):
|
||||
lines.reverse()
|
||||
for sortkey,line in lines:
|
||||
outfile.write(line)
|
||||
|
||||
if total.n_files > 1:
|
||||
outfile.write(rule)
|
||||
args = ("TOTAL", total_uncovered, total.n_executed)
|
||||
if self.branches:
|
||||
args += (total.n_branches, total.n_executed_branches)
|
||||
args += (total.pc_covered,)
|
||||
if self.show_missing:
|
||||
args += ("",)
|
||||
outfile.write(fmt_coverage % args)
|
||||
|
||||
def report(o):
|
||||
c = coverage()
|
||||
c.load()
|
||||
r = MyReporter(c, show_missing=False, ignore_errors=False)
|
||||
r.report(sortby=o['sortby'])
|
||||
|
||||
if __name__ == '__main__':
|
||||
o = Options()
|
||||
o.parseOptions()
|
||||
report(o)
|
||||
|
||||
|
||||
|
|
@ -15,7 +15,7 @@ DEBNAME := $(firstword $(DEB_PACKAGES))
|
|||
STAGING_DIR := $(CURDIR)/debian/$(DEBNAME)
|
||||
|
||||
DEB_INSTALL_DOCS_ALL := COPYING.TGPPL.html CREDITS NEWS README.txt relnotes.txt \
|
||||
docs misc/spacetime misc/cpu-watcher.tac
|
||||
docs misc/operations_helpers/spacetime misc/operations_helpers/cpu-watcher.tac
|
||||
DEB_COMPRESS_EXCLUDE := .tac
|
||||
|
||||
install/$(DEBNAME)::
|
||||
|
|
|
@ -15,7 +15,7 @@ DEBNAME := $(firstword $(DEB_PACKAGES))
|
|||
STAGING_DIR := $(CURDIR)/debian/$(DEBNAME)
|
||||
|
||||
DEB_INSTALL_DOCS_ALL := COPYING.GPL COPYING.TGPPL.html CREDITS \
|
||||
NEWS README.txt relnotes.txt docs misc/spacetime misc/cpu-watcher.tac
|
||||
NEWS README.txt relnotes.txt docs misc/operations_helpers/spacetime misc/operations_helpers/cpu-watcher.tac
|
||||
DEB_COMPRESS_EXCLUDE := .tac
|
||||
|
||||
|
||||
|
@ -39,7 +39,7 @@ install/$(DEBNAME)::
|
|||
chmod +x $(STAGING_DIR)/usr/bin/tahoe.new
|
||||
mv $(STAGING_DIR)/usr/bin/tahoe.new $(STAGING_DIR)/usr/bin/tahoe
|
||||
|
||||
dh_install misc/munin/* usr/share/$(DEBNAME)/munin
|
||||
dh_install misc/operations_helpers/munin/* usr/share/$(DEBNAME)/munin
|
||||
chmod +x $(STAGING_DIR)/usr/share/$(DEBNAME)/munin/*
|
||||
|
||||
dh_pycentral
|
|
@ -15,7 +15,7 @@ DEBNAME := $(firstword $(DEB_PACKAGES))
|
|||
STAGING_DIR := $(CURDIR)/debian/$(DEBNAME)
|
||||
|
||||
DEB_INSTALL_DOCS_ALL := COPYING.GPL COPYING.TGPPL.html CREDITS \
|
||||
NEWS README.txt relnotes.txt docs misc/spacetime misc/cpu-watcher.tac
|
||||
NEWS README.txt relnotes.txt docs misc/operations_helpers/spacetime misc/operations_helpers/cpu-watcher.tac
|
||||
DEB_COMPRESS_EXCLUDE := .tac
|
||||
|
||||
|
||||
|
@ -39,7 +39,7 @@ install/$(DEBNAME)::
|
|||
chmod +x $(STAGING_DIR)/usr/bin/tahoe.new
|
||||
mv $(STAGING_DIR)/usr/bin/tahoe.new $(STAGING_DIR)/usr/bin/tahoe
|
||||
|
||||
dh_install misc/munin/* usr/share/$(DEBNAME)/munin
|
||||
dh_install misc/operations_helpers/munin/* usr/share/$(DEBNAME)/munin
|
||||
chmod +x $(STAGING_DIR)/usr/share/$(DEBNAME)/munin/*
|
||||
|
||||
dh_pycentral
|
|
@ -15,7 +15,7 @@ DEBNAME := $(firstword $(DEB_PACKAGES))
|
|||
STAGING_DIR := $(CURDIR)/debian/$(DEBNAME)
|
||||
|
||||
DEB_INSTALL_DOCS_ALL := COPYING.GPL COPYING.TGPPL.html CREDITS \
|
||||
NEWS README.txt relnotes.txt docs misc/spacetime misc/cpu-watcher.tac
|
||||
NEWS README.txt relnotes.txt docs misc/operations_helpers/spacetime misc/operations_helpers/cpu-watcher.tac
|
||||
DEB_COMPRESS_EXCLUDE := .tac
|
||||
|
||||
|
||||
|
@ -39,7 +39,7 @@ install/$(DEBNAME)::
|
|||
chmod +x $(STAGING_DIR)/usr/bin/tahoe.new
|
||||
mv $(STAGING_DIR)/usr/bin/tahoe.new $(STAGING_DIR)/usr/bin/tahoe
|
||||
|
||||
dh_install misc/munin/* usr/share/$(DEBNAME)/munin
|
||||
dh_install misc/operations_helpers/munin/* usr/share/$(DEBNAME)/munin
|
||||
chmod +x $(STAGING_DIR)/usr/share/$(DEBNAME)/munin/*
|
||||
|
||||
dh_pycentral
|
|
@ -1,4 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
find helper/CHK_encoding -mtime +7 -print0 |xargs -0 rm
|
||||
find helper/CHK_incoming -mtime +14 -print0 |xargs -0 rm
|
|
@ -1,5 +0,0 @@
|
|||
/test/
|
||||
/foolscap/
|
||||
/zfec/
|
||||
/allmydata/Crypto/
|
||||
/simplejson/
|
|
@ -1,87 +0,0 @@
|
|||
#! /usr/bin/env python
|
||||
|
||||
import os, sys, pickle
|
||||
|
||||
def longest_common_prefix(elements):
|
||||
if not elements:
|
||||
return ""
|
||||
prefix = elements[0]
|
||||
for e in elements:
|
||||
prefix = longest_common_prefix_2(prefix, e)
|
||||
return prefix
|
||||
def longest_common_prefix_2(a, b):
|
||||
maxlen = min(len(a), len(b))
|
||||
for i in range(maxlen, 0, -1):
|
||||
if a[:i] == b[:i]:
|
||||
return a[:i]
|
||||
return ""
|
||||
|
||||
## def write_el(r2):
|
||||
## filenames = sorted(r2.keys())
|
||||
## out = open(".figleaf.el", "w")
|
||||
## out.write("(setq figleaf-results '(\n")
|
||||
## for f in filenames:
|
||||
## linenumbers = r2[f]
|
||||
## out.write(' ("%s" (%s))\n' % (f, " ".join([str(ln)
|
||||
## for ln in linenumbers])))
|
||||
## out.write(" ))\n")
|
||||
## out.close()
|
||||
|
||||
def write_el(r2, source):
|
||||
filenames = sorted(r2.keys())
|
||||
out = open(".figleaf.el", "w")
|
||||
out.write("""
|
||||
;; This is an elisp-readable form of the figleaf coverage data. It defines a
|
||||
;; single top-level hash table in which the load-path-relative filename (like
|
||||
;; allmydata/download.py) is the key, and the value is a three-element list.
|
||||
;; The first element of this list is a list of line numbers that represent
|
||||
;; actual code. The second is a list of line numbers for lines which got used
|
||||
;; during the unit test. The third is a list of line numbers for code lines
|
||||
;; that were not covered (since 'code' and 'covered' start as sets, this last
|
||||
;; list is equal to 'code - covered').
|
||||
|
||||
""")
|
||||
out.write("(let ((results (make-hash-table :test 'equal)))\n")
|
||||
for f in filenames:
|
||||
covered_linenumbers = r2[f]
|
||||
code_linenumbers = source[f]
|
||||
uncovered_code = code_linenumbers - covered_linenumbers
|
||||
out.write(" (puthash \"%s\" '((%s) (%s) (%s)) results)\n"
|
||||
% (f,
|
||||
" ".join([str(ln) for ln in sorted(code_linenumbers)]),
|
||||
" ".join([str(ln) for ln in sorted(covered_linenumbers)]),
|
||||
" ".join([str(ln) for ln in sorted(uncovered_code)]),
|
||||
))
|
||||
out.write(" results)\n")
|
||||
out.close()
|
||||
|
||||
import figleaf
|
||||
|
||||
def examine_source(filename):
|
||||
f = open(filename, "r")
|
||||
lines = figleaf.get_lines(f)
|
||||
f.close()
|
||||
return lines
|
||||
|
||||
def main():
|
||||
results = pickle.load(open(sys.argv[1], "rb"))
|
||||
import_prefix = os.path.abspath(sys.argv[2])
|
||||
if not import_prefix.endswith("/"):
|
||||
import_prefix = import_prefix + "/"
|
||||
plen = len(import_prefix)
|
||||
|
||||
r2 = {}
|
||||
source = {}
|
||||
filenames = sorted(results.keys())
|
||||
here = os.getcwd()
|
||||
for f in filenames:
|
||||
if f.startswith(import_prefix):
|
||||
short = f[plen:]
|
||||
r2[short] = results[f]
|
||||
source[short] = examine_source(f)
|
||||
write_el(r2, source)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
import sys
|
||||
|
||||
import os, sys
|
||||
from twisted.python.procutils import which
|
||||
|
||||
def find_exe(exename):
|
||||
"""
|
||||
Look for something named exename or exename + ".py".
|
||||
|
||||
This is a kludge.
|
||||
|
||||
@return: a list containing one element which is the quoted path to the
|
||||
exename (if it is thought to be executable), or else the first element
|
||||
being quoted sys.executable and the second element being the quoted path
|
||||
to the exename + ".py", or else return False if one can't be found
|
||||
"""
|
||||
exes = which(exename)
|
||||
exe = exes and exes[0]
|
||||
if not exe:
|
||||
exe = os.path.join(sys.prefix, 'scripts', exename + '.py')
|
||||
if os.path.exists(exe):
|
||||
path, ext = os.path.splitext(exe)
|
||||
if ext.lower() in [".exe", ".bat",]:
|
||||
cmd = ['"' + exe + '"',]
|
||||
else:
|
||||
cmd = ['"' + sys.executable + '"', '"' + exe + '"',]
|
||||
return cmd
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cmd = find_exe("trial")
|
||||
if cmd:
|
||||
print " ".join(cmd).replace("\\", "/")
|
||||
else:
|
||||
sys.exit(1)
|
201
misc/logtool.py
201
misc/logtool.py
|
@ -1,201 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os.path, time, pickle
|
||||
import foolscap
|
||||
from foolscap import RemoteInterface
|
||||
from foolscap.eventual import fireEventually
|
||||
from foolscap.schema import DictOf, Any
|
||||
from twisted.internet import reactor, defer
|
||||
from zope.interface import implements
|
||||
from twisted.python import usage
|
||||
#from twisted.python import log
|
||||
#import sys
|
||||
#log.startLogging(sys.stderr)
|
||||
|
||||
class Options(usage.Options):
|
||||
longdesc = """
|
||||
logtool tail FURL : follow logs of the target node
|
||||
logtool gather : run as a daemon, record all logs to the current directory
|
||||
logtool dump FILE : dump the logs recorded by 'logtool gather'
|
||||
"""
|
||||
|
||||
def parseArgs(self, mode, *args):
|
||||
self.mode = mode
|
||||
if mode == "tail":
|
||||
target = args[0]
|
||||
if target.startswith("pb:"):
|
||||
self.target_furl = target
|
||||
elif os.path.isfile(target):
|
||||
self.target_furl = open(target, "r").read().strip()
|
||||
elif os.path.isdir(target):
|
||||
fn = os.path.join(target, "logport.furl")
|
||||
self.target_furl = open(fn, "r").read().strip()
|
||||
else:
|
||||
raise ValueError("Can't use tail target: %s" % target)
|
||||
elif mode == "dump":
|
||||
self.dumpfile = args[0]
|
||||
|
||||
|
||||
class RILogObserver(RemoteInterface):
|
||||
def msg(logmsg=DictOf(str, Any())):
|
||||
return None
|
||||
class RISubscription(RemoteInterface):
|
||||
pass
|
||||
|
||||
class RILogPublisher(RemoteInterface):
|
||||
def get_versions():
|
||||
return DictOf(str, str)
|
||||
def subscribe_to_all(observer=RILogObserver):
|
||||
return RISubscription
|
||||
def unsubscribe(subscription=Any()):
|
||||
# I don't know how to get the constraint right: unsubscribe() should
|
||||
# accept return value of subscribe_to_all()
|
||||
return None
|
||||
|
||||
class RILogGatherer(RemoteInterface):
|
||||
def logport(nodeid=str, logport=RILogPublisher):
|
||||
return None
|
||||
|
||||
class LogPrinter(foolscap.Referenceable):
|
||||
implements(RILogObserver)
|
||||
|
||||
def remote_msg(self, d):
|
||||
print d
|
||||
|
||||
class LogTail:
|
||||
|
||||
def start(self, target_furl):
|
||||
print "Connecting.."
|
||||
d = defer.maybeDeferred(self.setup_tub)
|
||||
d.addCallback(self._tub_ready, target_furl)
|
||||
return d
|
||||
|
||||
def setup_tub(self):
|
||||
self._tub = foolscap.Tub()
|
||||
self._tub.startService()
|
||||
|
||||
def _tub_ready(self, res, target_furl):
|
||||
d = self._tub.getReference(target_furl)
|
||||
d.addCallback(self._got_logpublisher)
|
||||
return d
|
||||
|
||||
def _got_logpublisher(self, publisher):
|
||||
print "Connected"
|
||||
lp = LogPrinter()
|
||||
d = publisher.callRemote("subscribe_to_all", lp)
|
||||
return d
|
||||
|
||||
def remote_msg(self, d):
|
||||
print d
|
||||
|
||||
class LogSaver(foolscap.Referenceable):
|
||||
implements(RILogObserver)
|
||||
def __init__(self, nodeid, savefile):
|
||||
self.nodeid = nodeid
|
||||
self.f = savefile
|
||||
|
||||
def remote_msg(self, d):
|
||||
e = {"from": self.nodeid,
|
||||
"rx_time": time.time(),
|
||||
"d": d,
|
||||
}
|
||||
pickle.dump(e, self.f)
|
||||
|
||||
def disconnected(self):
|
||||
del self.f
|
||||
from allmydata.util.idlib import shortnodeid_b2a
|
||||
print "LOGPORT CLOSED", shortnodeid_b2a(self.nodeid)
|
||||
|
||||
class LogGatherer(foolscap.Referenceable):
|
||||
implements(RILogGatherer)
|
||||
|
||||
def start(self, res):
|
||||
self._savefile = open("logs.pickle", "ab", 0)
|
||||
d = self.setup_tub()
|
||||
d.addCallback(self._tub_ready)
|
||||
return d
|
||||
|
||||
def setup_tub(self):
|
||||
from allmydata.util import iputil
|
||||
self._tub = foolscap.Tub(certFile="gatherer.pem")
|
||||
self._tub.startService()
|
||||
portnumfile = "portnum"
|
||||
try:
|
||||
portnum = int(open(portnumfile, "r").read())
|
||||
except (EnvironmentError, ValueError):
|
||||
portnum = 0
|
||||
self._tub.listenOn("tcp:%d" % portnum)
|
||||
d = defer.maybeDeferred(iputil.get_local_addresses_async)
|
||||
d.addCallback(self._set_location)
|
||||
return d
|
||||
|
||||
def _set_location(self, local_addresses):
|
||||
l = self._tub.getListeners()[0]
|
||||
portnum = l.getPortnum()
|
||||
portnumfile = "portnum"
|
||||
open(portnumfile, "w").write("%d\n" % portnum)
|
||||
local_addresses = [ "%s:%d" % (addr, portnum,)
|
||||
for addr in local_addresses ]
|
||||
location = ",".join(local_addresses)
|
||||
self._tub.setLocation(location)
|
||||
|
||||
def _tub_ready(self, res):
|
||||
me = self._tub.registerReference(self, furlFile="log_gatherer.furl")
|
||||
print "Gatherer waiting at:", me
|
||||
|
||||
def remote_logport(self, nodeid, publisher):
|
||||
from allmydata.util.idlib import shortnodeid_b2a
|
||||
short = shortnodeid_b2a(nodeid)
|
||||
print "GOT LOGPORT", short
|
||||
ls = LogSaver(nodeid, self._savefile)
|
||||
publisher.callRemote("subscribe_to_all", ls)
|
||||
publisher.notifyOnDisconnect(ls.disconnected)
|
||||
|
||||
class LogDumper:
|
||||
def start(self, options):
|
||||
from allmydata.util.idlib import shortnodeid_b2a
|
||||
fn = options.dumpfile
|
||||
f = open(fn, "rb")
|
||||
while True:
|
||||
try:
|
||||
e = pickle.load(f)
|
||||
short = shortnodeid_b2a(e['from'])
|
||||
when = e['rx_time']
|
||||
print "%s %r: %r" % (short, when, e['d'])
|
||||
except EOFError:
|
||||
break
|
||||
|
||||
class LogTool:
|
||||
|
||||
def run(self, options):
|
||||
mode = options.mode
|
||||
if mode == "tail":
|
||||
lt = LogTail()
|
||||
d = fireEventually(options.target_furl)
|
||||
d.addCallback(lt.start)
|
||||
d.addErrback(self._error)
|
||||
print "starting.."
|
||||
reactor.run()
|
||||
elif mode == "gather":
|
||||
lg = LogGatherer()
|
||||
d = fireEventually()
|
||||
d.addCallback(lg.start)
|
||||
d.addErrback(self._error)
|
||||
print "starting.."
|
||||
reactor.run()
|
||||
elif mode == "dump":
|
||||
ld = LogDumper()
|
||||
ld.start(options)
|
||||
else:
|
||||
print "unknown mode '%s'" % mode
|
||||
raise NotImplementedError
|
||||
|
||||
def _error(self, f):
|
||||
print "ERROR", f
|
||||
reactor.stop()
|
||||
|
||||
if __name__ == '__main__':
|
||||
o = Options()
|
||||
o.parseOptions()
|
||||
lt = LogTool()
|
||||
lt.run(o)
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# This is a munin plugin which pulls data from the server in
|
||||
# misc/spacetime/diskwatcher.tac . It produces a graph of how much free space
|
||||
# misc/operations_helpers/spacetime/diskwatcher.tac . It produces a graph of how much free space
|
||||
# is left on all disks across the grid. The plugin should be configured with
|
||||
# env_url= pointing at the diskwatcher.tac webport.
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# This is a munin plugin which pulls data from the server in
|
||||
# misc/spacetime/diskwatcher.tac . It produces a graph of how much space is
|
||||
# misc/operations_helpers/spacetime/diskwatcher.tac . It produces a graph of how much space is
|
||||
# present on all disks across the grid, and how much space is actually being
|
||||
# used. The plugin should be configured with env_url= pointing at the
|
||||
# diskwatcher.tac webport.
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# This is a munin plugin which pulls data from the server in
|
||||
# misc/spacetime/diskwatcher.tac . It produces a graph of how much disk space
|
||||
# misc/operations_helpers/spacetime/diskwatcher.tac . It produces a graph of how much disk space
|
||||
# is being used per unit time. The plugin should be configured with env_url=
|
||||
# pointing at the diskwatcher.tac webport.
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# This is a munin plugin which pulls data from the server in
|
||||
# misc/spacetime/diskwatcher.tac . It produces a graph of how much space is
|
||||
# misc/operations_helpers/spacetime/diskwatcher.tac . It produces a graph of how much space is
|
||||
# used on all disks across the grid. The plugin should be configured with
|
||||
# env_url= pointing at the diskwatcher.tac webport.
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# This is a munin plugin which pulls data from the server in
|
||||
# misc/spacetime/diskwatcher.tac . It produces a graph of how much time is
|
||||
# misc/operations_helpers/spacetime/diskwatcher.tac . It produces a graph of how much time is
|
||||
# left before the grid fills up. The plugin should be configured with
|
||||
# env_url= pointing at the diskwatcher.tac webport.
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# This is a munin plugin which pulls total-used data from the server in
|
||||
# misc/spacetime/diskwatcher.tac, and a total-deep-size number from custom
|
||||
# misc/operations_helpers/spacetime/diskwatcher.tac, and a total-deep-size number from custom
|
||||
# PHP database-querying scripts on a different server. It produces a graph of
|
||||
# how much garbage/overhead is present in the grid: the ratio of total-used
|
||||
# over (total-deep-size*N/k), expressed as a percentage. No overhead would be
|
|
@ -1,66 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""This program is a client that sends files to xfer-server.py. You give it
|
||||
the server's FURL, and it can put files into the server's targetdir (and
|
||||
nowhere else). When you want an unattended process on one machine to be able
|
||||
to place files in a remote directory, you could give its parent process an
|
||||
ssh account on the target, with an empty passphrase, but that provides too
|
||||
much power. This program is a least-privilege replacement for the ssh/scp
|
||||
approach.
|
||||
|
||||
Give the client a FURL, or a file where the FURL is stored. You also give it
|
||||
the name of the local file to be transferred. The last component of the local
|
||||
pathname will be used as the remote filename.
|
||||
"""
|
||||
|
||||
import os.path
|
||||
from twisted.internet import reactor
|
||||
from foolscap import UnauthenticatedTub
|
||||
from twisted.python import usage
|
||||
|
||||
class Options(usage.Options):
|
||||
synopsis = "xfer-client.py (--furl FURL | --furlfile furlfile) LOCALFILE"
|
||||
optParameters = [
|
||||
["furl", "f", None,
|
||||
"The server FURL. You must either provide --furl or --furlfile."],
|
||||
["furlfile", "l", None,
|
||||
"A file containing the server FURL."],
|
||||
]
|
||||
optFlags = [
|
||||
["quiet", "q", "Do not announce success."],
|
||||
]
|
||||
|
||||
def parseArgs(self, localfile):
|
||||
self['localfile'] = localfile
|
||||
|
||||
def postOptions(self):
|
||||
if not self["furl"] and not self["furlfile"]:
|
||||
raise usage.UsageError("you must either provide --furl or --furlfile")
|
||||
if not os.path.exists(self["localfile"]):
|
||||
raise usage.UsageError("local file '%s' doesn't exist" % self["localfile"])
|
||||
|
||||
opts = Options()
|
||||
opts.parseOptions()
|
||||
tub = UnauthenticatedTub()
|
||||
tub.startService()
|
||||
if opts["furl"]:
|
||||
furl = opts["furl"]
|
||||
else:
|
||||
furl = open(os.path.expanduser(opts["furlfile"]), "r").read().strip()
|
||||
remotename = os.path.basename(opts["localfile"])
|
||||
d = tub.getReference(furl)
|
||||
def _push(rref):
|
||||
data = open(os.path.expanduser(opts["localfile"]), "r").read()
|
||||
return rref.callRemote("putfile", remotename, data)
|
||||
d.addCallback(_push)
|
||||
def _success(res):
|
||||
reactor.stop()
|
||||
if not opts["quiet"]:
|
||||
print "file transferred to %s" % remotename
|
||||
def _failure(f):
|
||||
reactor.stop()
|
||||
print "error while transferring file:"
|
||||
print f
|
||||
d.addCallbacks(_success, _failure)
|
||||
|
||||
reactor.run()
|
2
setup.py
2
setup.py
|
@ -205,7 +205,7 @@ class TestMacDiskImage(Command):
|
|||
pass
|
||||
def run(self):
|
||||
import sys
|
||||
sys.path.append('misc')
|
||||
sys.path.append(os.path.join('misc', 'build_helpers'))
|
||||
import test_mac_diskimage
|
||||
return test_mac_diskimage.test_mac_diskimage('Allmydata', version=self.distribution.metadata.version)
|
||||
|
||||
|
|
Loading…
Reference in New Issue