Merge remote-tracking branch 'origin/master' into switch-to-static-metadata-setuptools
This commit is contained in:
commit
3a26c6a36b
|
@ -1,7 +1,7 @@
|
|||
ARG TAG
|
||||
FROM debian:${TAG}
|
||||
ARG PYTHON_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
||||
ENV VIRTUALENV_PATH /tmp/venv
|
||||
# This will get updated by the CircleCI checkout step.
|
||||
|
@ -18,15 +18,11 @@ RUN apt-get --quiet update && \
|
|||
libffi-dev \
|
||||
libssl-dev \
|
||||
libyaml-dev \
|
||||
virtualenv
|
||||
virtualenv \
|
||||
tor
|
||||
|
||||
# Get the project source. This is better than it seems. CircleCI will
|
||||
# *update* this checkout on each job run, saving us more time per-job.
|
||||
COPY . ${BUILD_SRC_ROOT}
|
||||
|
||||
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python${PYTHON_VERSION}"
|
||||
|
||||
# Only the integration tests currently need this but it doesn't hurt to always
|
||||
# have it present and it's simpler than building a whole extra image just for
|
||||
# the integration tests.
|
||||
RUN ${BUILD_SRC_ROOT}/integration/install-tor.sh
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
ARG TAG
|
||||
FROM centos:${TAG}
|
||||
FROM oraclelinux:${TAG}
|
||||
ARG PYTHON_VERSION
|
||||
|
||||
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
||||
|
@ -13,7 +13,6 @@ RUN yum install --assumeyes \
|
|||
sudo \
|
||||
make automake gcc gcc-c++ \
|
||||
python${PYTHON_VERSION} \
|
||||
python${PYTHON_VERSION}-devel \
|
||||
libffi-devel \
|
||||
openssl-devel \
|
||||
libyaml \
|
|
@ -1,7 +1,7 @@
|
|||
ARG TAG
|
||||
FROM ubuntu:${TAG}
|
||||
ARG PYTHON_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
||||
ENV VIRTUALENV_PATH /tmp/venv
|
||||
# This will get updated by the CircleCI checkout step.
|
||||
|
|
|
@ -15,53 +15,38 @@ workflows:
|
|||
ci:
|
||||
jobs:
|
||||
# Start with jobs testing various platforms.
|
||||
- "debian-9":
|
||||
{}
|
||||
- "debian-10":
|
||||
requires:
|
||||
- "debian-9"
|
||||
{}
|
||||
- "debian-11":
|
||||
{}
|
||||
|
||||
- "ubuntu-20-04":
|
||||
{}
|
||||
- "ubuntu-18-04":
|
||||
requires:
|
||||
- "ubuntu-20-04"
|
||||
- "ubuntu-16-04":
|
||||
requires:
|
||||
- "ubuntu-20-04"
|
||||
|
||||
- "fedora-29":
|
||||
{}
|
||||
- "fedora-28":
|
||||
requires:
|
||||
- "fedora-29"
|
||||
|
||||
- "centos-8":
|
||||
# Equivalent to RHEL 8; CentOS 8 is dead.
|
||||
- "oraclelinux-8":
|
||||
{}
|
||||
|
||||
- "nixos-19-09":
|
||||
{}
|
||||
- "nixos":
|
||||
name: "NixOS 21.05"
|
||||
nixpkgs: "21.05"
|
||||
|
||||
- "nixos-21-05":
|
||||
{}
|
||||
- "nixos":
|
||||
name: "NixOS 21.11"
|
||||
nixpkgs: "21.11"
|
||||
|
||||
# Test against PyPy 2.7
|
||||
- "pypy27-buster":
|
||||
{}
|
||||
|
||||
# Just one Python 3.6 configuration while the port is in-progress.
|
||||
- "python36":
|
||||
{}
|
||||
# Eventually, test against PyPy 3.8
|
||||
#- "pypy27-buster":
|
||||
# {}
|
||||
|
||||
# Other assorted tasks and configurations
|
||||
- "lint":
|
||||
{}
|
||||
- "codechecks3":
|
||||
- "codechecks":
|
||||
{}
|
||||
- "pyinstaller":
|
||||
{}
|
||||
- "deprecations":
|
||||
{}
|
||||
- "c-locale":
|
||||
{}
|
||||
# Any locale other than C or UTF-8.
|
||||
|
@ -72,7 +57,7 @@ workflows:
|
|||
requires:
|
||||
# If the unit test suite doesn't pass, don't bother running the
|
||||
# integration tests.
|
||||
- "debian-9"
|
||||
- "debian-11"
|
||||
|
||||
- "typechecks":
|
||||
{}
|
||||
|
@ -102,24 +87,19 @@ workflows:
|
|||
# https://app.circleci.com/settings/organization/github/tahoe-lafs/contexts
|
||||
- "build-image-debian-10": &DOCKERHUB_CONTEXT
|
||||
context: "dockerhub-auth"
|
||||
- "build-image-debian-9":
|
||||
<<: *DOCKERHUB_CONTEXT
|
||||
- "build-image-ubuntu-16-04":
|
||||
- "build-image-debian-11":
|
||||
<<: *DOCKERHUB_CONTEXT
|
||||
- "build-image-ubuntu-18-04":
|
||||
<<: *DOCKERHUB_CONTEXT
|
||||
- "build-image-ubuntu-20-04":
|
||||
<<: *DOCKERHUB_CONTEXT
|
||||
- "build-image-fedora-28":
|
||||
- "build-image-fedora-35":
|
||||
<<: *DOCKERHUB_CONTEXT
|
||||
- "build-image-fedora-29":
|
||||
<<: *DOCKERHUB_CONTEXT
|
||||
- "build-image-centos-8":
|
||||
<<: *DOCKERHUB_CONTEXT
|
||||
- "build-image-pypy27-buster":
|
||||
<<: *DOCKERHUB_CONTEXT
|
||||
- "build-image-python36-ubuntu":
|
||||
- "build-image-oraclelinux-8":
|
||||
<<: *DOCKERHUB_CONTEXT
|
||||
# Restore later as PyPy38
|
||||
#- "build-image-pypy27-buster":
|
||||
# <<: *DOCKERHUB_CONTEXT
|
||||
|
||||
|
||||
jobs:
|
||||
|
@ -145,10 +125,10 @@ jobs:
|
|||
# Since this job is never scheduled this step is never run so the
|
||||
# actual value here is irrelevant.
|
||||
|
||||
lint:
|
||||
codechecks:
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "circleci/python:2"
|
||||
image: "cimg/python:3.9"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
|
@ -163,28 +143,10 @@ jobs:
|
|||
command: |
|
||||
~/.local/bin/tox -e codechecks
|
||||
|
||||
codechecks3:
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "circleci/python:3"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
|
||||
- run:
|
||||
name: "Install tox"
|
||||
command: |
|
||||
pip install --user tox
|
||||
|
||||
- run:
|
||||
name: "Static-ish code checks"
|
||||
command: |
|
||||
~/.local/bin/tox -e codechecks3
|
||||
|
||||
pyinstaller:
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "circleci/python:2"
|
||||
image: "cimg/python:3.9"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
|
@ -207,10 +169,10 @@ jobs:
|
|||
command: |
|
||||
dist/Tahoe-LAFS/tahoe --version
|
||||
|
||||
debian-9: &DEBIAN
|
||||
debian-10: &DEBIAN
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/debian:9-py2.7"
|
||||
image: "tahoelafsci/debian:10-py3.7"
|
||||
user: "nobody"
|
||||
|
||||
environment: &UTF_8_ENVIRONMENT
|
||||
|
@ -224,7 +186,7 @@ jobs:
|
|||
# filenames and argv).
|
||||
LANG: "en_US.UTF-8"
|
||||
# Select a tox environment to run for this job.
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py27"
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py37"
|
||||
# Additional arguments to pass to tox.
|
||||
TAHOE_LAFS_TOX_ARGS: ""
|
||||
# The path in which test artifacts will be placed.
|
||||
|
@ -292,29 +254,29 @@ jobs:
|
|||
/tmp/venv/bin/codecov
|
||||
fi
|
||||
|
||||
|
||||
debian-10:
|
||||
debian-11:
|
||||
<<: *DEBIAN
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/debian:10-py2.7"
|
||||
image: "tahoelafsci/debian:11-py3.9"
|
||||
user: "nobody"
|
||||
|
||||
|
||||
pypy27-buster:
|
||||
<<: *DEBIAN
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/pypy:buster-py2"
|
||||
user: "nobody"
|
||||
|
||||
environment:
|
||||
<<: *UTF_8_ENVIRONMENT
|
||||
# We don't do coverage since it makes PyPy far too slow:
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "pypy27"
|
||||
# Since we didn't collect it, don't upload it.
|
||||
UPLOAD_COVERAGE: ""
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py39"
|
||||
|
||||
# Restore later using PyPy3.8
|
||||
# pypy27-buster:
|
||||
# <<: *DEBIAN
|
||||
# docker:
|
||||
# - <<: *DOCKERHUB_AUTH
|
||||
# image: "tahoelafsci/pypy:buster-py2"
|
||||
# user: "nobody"
|
||||
# environment:
|
||||
# <<: *UTF_8_ENVIRONMENT
|
||||
# # We don't do coverage since it makes PyPy far too slow:
|
||||
# TAHOE_LAFS_TOX_ENVIRONMENT: "pypy27"
|
||||
# # Since we didn't collect it, don't upload it.
|
||||
# UPLOAD_COVERAGE: ""
|
||||
|
||||
c-locale:
|
||||
<<: *DEBIAN
|
||||
|
@ -332,22 +294,12 @@ jobs:
|
|||
# aka "Latin 1"
|
||||
LANG: "en_US.ISO-8859-1"
|
||||
|
||||
|
||||
deprecations:
|
||||
<<: *DEBIAN
|
||||
|
||||
environment:
|
||||
<<: *UTF_8_ENVIRONMENT
|
||||
# Select the deprecations tox environments.
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "deprecations,upcoming-deprecations"
|
||||
# Put the logs somewhere we can report them.
|
||||
TAHOE_LAFS_WARNINGS_LOG: "/tmp/artifacts/deprecation-warnings.log"
|
||||
# The deprecations tox environments don't do coverage measurement.
|
||||
UPLOAD_COVERAGE: ""
|
||||
|
||||
|
||||
integration:
|
||||
<<: *DEBIAN
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/debian:11-py3.9"
|
||||
user: "nobody"
|
||||
|
||||
environment:
|
||||
<<: *UTF_8_ENVIRONMENT
|
||||
|
@ -362,28 +314,11 @@ jobs:
|
|||
- run: *SETUP_VIRTUALENV
|
||||
- run: *RUN_TESTS
|
||||
|
||||
|
||||
ubuntu-16-04:
|
||||
<<: *DEBIAN
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/ubuntu:16.04-py2.7"
|
||||
user: "nobody"
|
||||
|
||||
|
||||
ubuntu-18-04: &UBUNTU_18_04
|
||||
<<: *DEBIAN
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/ubuntu:18.04-py2.7"
|
||||
user: "nobody"
|
||||
|
||||
|
||||
python36:
|
||||
<<: *UBUNTU_18_04
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/ubuntu:18.04-py3"
|
||||
image: "tahoelafsci/ubuntu:18.04-py3.7"
|
||||
user: "nobody"
|
||||
|
||||
environment:
|
||||
|
@ -392,24 +327,28 @@ jobs:
|
|||
# this reporter on Python 3. So drop that and just specify the
|
||||
# reporter.
|
||||
TAHOE_LAFS_TRIAL_ARGS: "--reporter=subunitv2-file"
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py36"
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py37"
|
||||
|
||||
|
||||
ubuntu-20-04:
|
||||
<<: *DEBIAN
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/ubuntu:20.04"
|
||||
image: "tahoelafsci/ubuntu:20.04-py3.9"
|
||||
user: "nobody"
|
||||
environment:
|
||||
<<: *UTF_8_ENVIRONMENT
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py39"
|
||||
|
||||
|
||||
centos-8: &RHEL_DERIV
|
||||
oraclelinux-8: &RHEL_DERIV
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/centos:8-py2"
|
||||
image: "tahoelafsci/oraclelinux:8-py3.8"
|
||||
user: "nobody"
|
||||
|
||||
environment: *UTF_8_ENVIRONMENT
|
||||
environment:
|
||||
<<: *UTF_8_ENVIRONMENT
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py38"
|
||||
|
||||
# pip cannot install packages if the working directory is not readable.
|
||||
# We want to run a lot of steps as nobody instead of as root.
|
||||
|
@ -425,36 +364,65 @@ jobs:
|
|||
- store_artifacts: *STORE_OTHER_ARTIFACTS
|
||||
- run: *SUBMIT_COVERAGE
|
||||
|
||||
|
||||
fedora-28:
|
||||
fedora-35:
|
||||
<<: *RHEL_DERIV
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/fedora:28-py"
|
||||
image: "tahoelafsci/fedora:35-py3"
|
||||
user: "nobody"
|
||||
|
||||
nixos:
|
||||
parameters:
|
||||
nixpkgs:
|
||||
description: >-
|
||||
Reference the name of a niv-managed nixpkgs source (see `niv show`
|
||||
and nix/sources.json)
|
||||
type: "string"
|
||||
|
||||
fedora-29:
|
||||
<<: *RHEL_DERIV
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/fedora:29-py"
|
||||
user: "nobody"
|
||||
|
||||
nixos-19-09: &NIXOS
|
||||
docker:
|
||||
# Run in a highly Nix-capable environment.
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "nixorg/nix:circleci"
|
||||
image: "nixos/nix:2.3.16"
|
||||
|
||||
environment:
|
||||
NIX_PATH: "nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.09-small.tar.gz"
|
||||
SOURCE: "nix/"
|
||||
# CACHIX_AUTH_TOKEN is manually set in the CircleCI web UI and
|
||||
# allows us to push to CACHIX_NAME. We only need this set for
|
||||
# `cachix use` in this step.
|
||||
CACHIX_NAME: "tahoe-lafs-opensource"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
- "run":
|
||||
name: "Build and Test"
|
||||
# The nixos/nix image does not include ssh. Install it so the
|
||||
# `checkout` step will succeed. We also want cachix for
|
||||
# Nix-friendly caching.
|
||||
name: "Install Basic Dependencies"
|
||||
command: |
|
||||
nix-env \
|
||||
--file https://github.com/nixos/nixpkgs/archive/nixos-<<parameters.nixpkgs>>.tar.gz \
|
||||
--install \
|
||||
-A openssh cachix bash
|
||||
|
||||
- "checkout"
|
||||
|
||||
- run:
|
||||
name: "Cachix setup"
|
||||
# Record the store paths that exist before we did much. There's no
|
||||
# reason to cache these, they're either in the image or have to be
|
||||
# retrieved before we can use cachix to restore from cache.
|
||||
command: |
|
||||
cachix use "${CACHIX_NAME}"
|
||||
nix path-info --all > /tmp/store-path-pre-build
|
||||
|
||||
- "run":
|
||||
# The Nix package doesn't know how to do this part, unfortunately.
|
||||
name: "Generate version"
|
||||
command: |
|
||||
nix-shell \
|
||||
-p 'python3.withPackages (ps: [ ps.setuptools ])' \
|
||||
--run 'python setup.py update_version'
|
||||
|
||||
- "run":
|
||||
name: "Build"
|
||||
command: |
|
||||
# CircleCI build environment looks like it has a zillion and a
|
||||
# half cores. Don't let Nix autodetect this high core count
|
||||
|
@ -466,22 +434,55 @@ jobs:
|
|||
# build a couple simple little dependencies that don't take
|
||||
# advantage of multiple cores and we get a little speedup by doing
|
||||
# them in parallel.
|
||||
nix-build --cores 3 --max-jobs 2 "$SOURCE"
|
||||
nix-build --cores 3 --max-jobs 2 --argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>"
|
||||
|
||||
nixos-21-05:
|
||||
<<: *NIXOS
|
||||
- "run":
|
||||
name: "Test"
|
||||
command: |
|
||||
# Let it go somewhat wild for the test suite itself
|
||||
nix-build --cores 8 --argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>" tests.nix
|
||||
|
||||
environment:
|
||||
# Note this doesn't look more similar to the 19.09 NIX_PATH URL because
|
||||
# there was some internal shuffling by the NixOS project about how they
|
||||
# publish stable revisions.
|
||||
NIX_PATH: "nixpkgs=https://github.com/NixOS/nixpkgs/archive/d32b07e6df276d78e3640eb43882b80c9b2b3459.tar.gz"
|
||||
SOURCE: "nix/py3.nix"
|
||||
- run:
|
||||
# Send any new store objects to cachix.
|
||||
name: "Push to Cachix"
|
||||
when: "always"
|
||||
command: |
|
||||
# Cribbed from
|
||||
# https://circleci.com/blog/managing-secrets-when-you-have-pull-requests-from-outside-contributors/
|
||||
if [ -n "$CIRCLE_PR_NUMBER" ]; then
|
||||
# I'm sure you're thinking "CIRCLE_PR_NUMBER must just be the
|
||||
# number of the PR being built". Sorry, dear reader, you have
|
||||
# guessed poorly. It is also conditionally set based on whether
|
||||
# this is a PR from a fork or not.
|
||||
#
|
||||
# https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables
|
||||
echo "Skipping Cachix push for forked PR."
|
||||
else
|
||||
# If this *isn't* a build from a fork then we have the Cachix
|
||||
# write key in our environment and we can push any new objects
|
||||
# to Cachix.
|
||||
#
|
||||
# To decide what to push, we inspect the list of store objects
|
||||
# that existed before and after we did most of our work. Any
|
||||
# that are new after the work is probably a useful thing to have
|
||||
# around so push it to the cache. We exclude all derivation
|
||||
# objects (.drv files) because they're cheap to reconstruct and
|
||||
# by the time you know their cache key you've already done all
|
||||
# the work anyway.
|
||||
#
|
||||
# This shell expression for finding the objects and pushing them
|
||||
# was from the Cachix docs:
|
||||
#
|
||||
# https://docs.cachix.org/continuous-integration-setup/circleci.html
|
||||
#
|
||||
# but they seem to have removed it now.
|
||||
bash -c "comm -13 <(sort /tmp/store-path-pre-build | grep -v '\.drv$') <(nix path-info --all | grep -v '\.drv$' | sort) | cachix push $CACHIX_NAME"
|
||||
fi
|
||||
|
||||
typechecks:
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/ubuntu:18.04-py3"
|
||||
image: "tahoelafsci/ubuntu:18.04-py3.7"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
|
@ -493,7 +494,7 @@ jobs:
|
|||
docs:
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/ubuntu:18.04-py3"
|
||||
image: "tahoelafsci/ubuntu:18.04-py3.7"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
|
@ -511,16 +512,19 @@ jobs:
|
|||
# https://circleci.com/blog/how-to-build-a-docker-image-on-circleci-2-0/
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "docker:17.05.0-ce-git"
|
||||
# CircleCI build images; https://github.com/CircleCI-Public/cimg-base
|
||||
# for details.
|
||||
image: "cimg/base:2022.01"
|
||||
|
||||
environment:
|
||||
DISTRO: "tahoelafsci/<DISTRO>:foo-py2"
|
||||
TAG: "tahoelafsci/distro:<TAG>-py2"
|
||||
DISTRO: "tahoelafsci/<DISTRO>:foo-py3.9"
|
||||
TAG: "tahoelafsci/distro:<TAG>-py3.9"
|
||||
PYTHON_VERSION: "tahoelafsci/distro:tag-py<PYTHON_VERSION}"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
- "setup_remote_docker"
|
||||
- setup_remote_docker:
|
||||
version: "20.10.11"
|
||||
- run:
|
||||
name: "Log in to Dockerhub"
|
||||
command: |
|
||||
|
@ -547,26 +551,16 @@ jobs:
|
|||
environment:
|
||||
DISTRO: "debian"
|
||||
TAG: "10"
|
||||
PYTHON_VERSION: "2.7"
|
||||
PYTHON_VERSION: "3.7"
|
||||
|
||||
|
||||
build-image-debian-9:
|
||||
build-image-debian-11:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "debian"
|
||||
TAG: "9"
|
||||
PYTHON_VERSION: "2.7"
|
||||
|
||||
|
||||
build-image-ubuntu-16-04:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "ubuntu"
|
||||
TAG: "16.04"
|
||||
PYTHON_VERSION: "2.7"
|
||||
|
||||
TAG: "11"
|
||||
PYTHON_VERSION: "3.9"
|
||||
|
||||
build-image-ubuntu-18-04:
|
||||
<<: *BUILD_IMAGE
|
||||
|
@ -574,16 +568,7 @@ jobs:
|
|||
environment:
|
||||
DISTRO: "ubuntu"
|
||||
TAG: "18.04"
|
||||
PYTHON_VERSION: "2.7"
|
||||
|
||||
|
||||
build-image-python36-ubuntu:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "ubuntu"
|
||||
TAG: "18.04"
|
||||
PYTHON_VERSION: "3"
|
||||
PYTHON_VERSION: "3.7"
|
||||
|
||||
|
||||
build-image-ubuntu-20-04:
|
||||
|
@ -592,43 +577,32 @@ jobs:
|
|||
environment:
|
||||
DISTRO: "ubuntu"
|
||||
TAG: "20.04"
|
||||
PYTHON_VERSION: "2.7"
|
||||
PYTHON_VERSION: "3.9"
|
||||
|
||||
|
||||
build-image-centos-8:
|
||||
build-image-oraclelinux-8:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "centos"
|
||||
DISTRO: "oraclelinux"
|
||||
TAG: "8"
|
||||
PYTHON_VERSION: "2"
|
||||
PYTHON_VERSION: "3.8"
|
||||
|
||||
|
||||
build-image-fedora-28:
|
||||
build-image-fedora-35:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "fedora"
|
||||
TAG: "28"
|
||||
# The default on Fedora (this version anyway) is still Python 2.
|
||||
PYTHON_VERSION: ""
|
||||
TAG: "35"
|
||||
PYTHON_VERSION: "3"
|
||||
|
||||
# build-image-pypy27-buster:
|
||||
# <<: *BUILD_IMAGE
|
||||
|
||||
build-image-fedora-29:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "fedora"
|
||||
TAG: "29"
|
||||
|
||||
|
||||
build-image-pypy27-buster:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "pypy"
|
||||
TAG: "buster"
|
||||
# We only have Python 2 for PyPy right now so there's no support for
|
||||
# setting up PyPy 3 in the image building toolchain. This value is just
|
||||
# for constructing the right Docker image tag.
|
||||
PYTHON_VERSION: "2"
|
||||
# environment:
|
||||
# DISTRO: "pypy"
|
||||
# TAG: "buster"
|
||||
# # We only have Python 2 for PyPy right now so there's no support for
|
||||
# # setting up PyPy 3 in the image building toolchain. This value is just
|
||||
# # for constructing the right Docker image tag.
|
||||
# PYTHON_VERSION: "2"
|
||||
|
|
|
@ -52,7 +52,7 @@ fi
|
|||
# This is primarily aimed at catching hangs on the PyPy job which runs for
|
||||
# about 21 minutes and then gets killed by CircleCI in a way that fails the
|
||||
# job and bypasses our "allowed failure" logic.
|
||||
TIMEOUT="timeout --kill-after 1m 15m"
|
||||
TIMEOUT="timeout --kill-after 1m 25m"
|
||||
|
||||
# Run the test suite as a non-root user. This is the expected usage some
|
||||
# small areas of the test suite assume non-root privileges (such as unreadable
|
||||
|
|
|
@ -18,3 +18,9 @@ Examples of contributions include:
|
|||
|
||||
Before authoring or reviewing a patch,
|
||||
please familiarize yourself with the `Coding Standards <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CodingStandards>`_ and the `Contributor Code of Conduct <../docs/CODE_OF_CONDUCT.md>`_.
|
||||
|
||||
|
||||
🥳 First Contribution?
|
||||
======================
|
||||
|
||||
If you are committing to Tahoe for the very first time, consider adding your name to our contributor list in `CREDITS <../CREDITS>`__
|
||||
|
|
|
@ -6,6 +6,33 @@ on:
|
|||
- "master"
|
||||
pull_request:
|
||||
|
||||
# At the start of each workflow run, GitHub creates a unique
|
||||
# GITHUB_TOKEN secret to use in the workflow. It is a good idea for
|
||||
# this GITHUB_TOKEN to have the minimum of permissions. See:
|
||||
#
|
||||
# - https://docs.github.com/en/actions/security-guides/automatic-token-authentication
|
||||
# - https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
#
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
# Control to what degree jobs in this workflow will run concurrently with
|
||||
# other instances of themselves.
|
||||
#
|
||||
# https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#concurrency
|
||||
concurrency:
|
||||
# We want every revision on master to run the workflow completely.
|
||||
# "head_ref" is not set for the "push" event but it is set for the
|
||||
# "pull_request" event. If it is set then it is the name of the branch and
|
||||
# we can use it to make sure each branch has only one active workflow at a
|
||||
# time. If it is not set then we can compute a unique string that gives
|
||||
# every master/push workflow its own group.
|
||||
group: "${{ github.head_ref || format('{0}-{1}', github.run_number, github.run_attempt) }}"
|
||||
|
||||
# Then, we say that if a new workflow wants to start in the same group as a
|
||||
# running workflow, the running workflow should be cancelled.
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
# Tell Hypothesis which configuration we want it to use.
|
||||
TAHOE_LAFS_HYPOTHESIS_PROFILE: "ci"
|
||||
|
@ -21,49 +48,35 @@ jobs:
|
|||
- windows-latest
|
||||
- ubuntu-latest
|
||||
python-version:
|
||||
- 2.7
|
||||
- 3.6
|
||||
- 3.7
|
||||
- 3.8
|
||||
- 3.9
|
||||
- "3.7"
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
include:
|
||||
# On macOS don't bother with 3.6-3.8, just to get faster builds.
|
||||
- os: macos-10.15
|
||||
python-version: 2.7
|
||||
# On macOS don't bother with 3.7-3.8, just to get faster builds.
|
||||
- os: macos-latest
|
||||
python-version: 3.9
|
||||
|
||||
python-version: "3.9"
|
||||
- os: macos-latest
|
||||
python-version: "3.10"
|
||||
# We only support PyPy on Linux at the moment.
|
||||
- os: ubuntu-latest
|
||||
python-version: "pypy-3.7"
|
||||
- os: ubuntu-latest
|
||||
python-version: "pypy-3.8"
|
||||
|
||||
steps:
|
||||
# See https://github.com/actions/checkout. A fetch-depth of 0
|
||||
# fetches all tags and branches.
|
||||
- name: Check out Tahoe-LAFS sources
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
# To use pip caching with GitHub Actions in an OS-independent
|
||||
# manner, we need `pip cache dir` command, which became
|
||||
# available since pip v20.1+. At the time of writing this,
|
||||
# GitHub Actions offers pip v20.3.3 for both ubuntu-latest and
|
||||
# windows-latest, and pip v20.3.1 for macos-latest.
|
||||
- name: Get pip cache directory
|
||||
id: pip-cache
|
||||
run: |
|
||||
echo "::set-output name=dir::$(pip cache dir)"
|
||||
|
||||
# See https://github.com/actions/cache
|
||||
- name: Use pip cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.dir }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
cache: 'pip' # caching pip dependencies
|
||||
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
|
@ -77,13 +90,13 @@ jobs:
|
|||
run: python -m tox
|
||||
|
||||
- name: Upload eliot.log
|
||||
uses: actions/upload-artifact@v1
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: eliot.log
|
||||
path: eliot.log
|
||||
|
||||
- name: Upload trial log
|
||||
uses: actions/upload-artifact@v1
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test.log
|
||||
path: _trial_temp/test.log
|
||||
|
@ -92,25 +105,6 @@ jobs:
|
|||
# Action for this, as of Jan 2021 it does not support Python coverage
|
||||
# files - only lcov files. Therefore, we use coveralls-python, the
|
||||
# coveralls.io-supplied Python reporter, for this.
|
||||
#
|
||||
# It is coveralls-python 1.x that has maintained compatibility
|
||||
# with Python 2, while coveralls-python 3.x is compatible with
|
||||
# Python 3. Sadly we can't use them both in the same workflow.
|
||||
#
|
||||
# The two versions of coveralls-python are somewhat mutually
|
||||
# incompatible. Mixing these two different versions when
|
||||
# reporting coverage to coveralls.io will lead to grief, since
|
||||
# they get job IDs in different fashion. If we use both
|
||||
# versions of coveralls in the same workflow, the finalizing
|
||||
# step will be able to mark only part of the jobs as done, and
|
||||
# the other part will be left hanging, never marked as done: it
|
||||
# does not matter if we make an API call or `coveralls --finish`
|
||||
# to indicate that CI has finished running.
|
||||
#
|
||||
# So we try to use the newer coveralls-python that is available
|
||||
# via Python 3 (which is present in GitHub Actions tool cache,
|
||||
# even when we're running Python 2.7 tests) throughout this
|
||||
# workflow.
|
||||
- name: "Report Coverage to Coveralls"
|
||||
run: |
|
||||
pip3 install --upgrade coveralls==3.0.1
|
||||
|
@ -161,22 +155,21 @@ jobs:
|
|||
matrix:
|
||||
os:
|
||||
- windows-latest
|
||||
- ubuntu-latest
|
||||
# 22.04 has some issue with Tor at the moment:
|
||||
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3943
|
||||
- ubuntu-20.04
|
||||
python-version:
|
||||
- 2.7
|
||||
- 3.6
|
||||
- 3.7
|
||||
- 3.9
|
||||
include:
|
||||
# On macOS don't bother with 3.6, just to get faster builds.
|
||||
- os: macos-10.15
|
||||
python-version: 2.7
|
||||
# On macOS don't bother with 3.7, just to get faster builds.
|
||||
- os: macos-latest
|
||||
python-version: 3.9
|
||||
|
||||
steps:
|
||||
|
||||
- name: Install Tor [Ubuntu]
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
if: ${{ contains(matrix.os, 'ubuntu') }}
|
||||
run: sudo apt install tor
|
||||
|
||||
# TODO: See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3744.
|
||||
|
@ -185,38 +178,24 @@ jobs:
|
|||
- name: Install Tor [macOS, ${{ matrix.python-version }} ]
|
||||
if: ${{ contains(matrix.os, 'macos') }}
|
||||
run: |
|
||||
brew extract --version 0.4.5.8 tor homebrew/cask
|
||||
brew install tor@0.4.5.8
|
||||
brew link --overwrite tor@0.4.5.8
|
||||
brew install tor
|
||||
|
||||
- name: Install Tor [Windows]
|
||||
if: matrix.os == 'windows-latest'
|
||||
uses: crazy-max/ghaction-chocolatey@v1
|
||||
uses: crazy-max/ghaction-chocolatey@v2
|
||||
with:
|
||||
args: install tor
|
||||
|
||||
- name: Check out Tahoe-LAFS sources
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Get pip cache directory
|
||||
id: pip-cache
|
||||
run: |
|
||||
echo "::set-output name=dir::$(pip cache dir)"
|
||||
|
||||
- name: Use pip cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.dir }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
cache: 'pip' # caching pip dependencies
|
||||
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
|
@ -226,16 +205,16 @@ jobs:
|
|||
- name: Display tool versions
|
||||
run: python misc/build_helpers/show-tool-versions.py
|
||||
|
||||
- name: Run "Python 2 integration tests"
|
||||
if: ${{ matrix.python-version == '2.7' }}
|
||||
- name: Run "Python 3 integration tests"
|
||||
env:
|
||||
# On macOS this is necessary to ensure unix socket paths for tor
|
||||
# aren't too long. On Windows tox won't pass it through so it has no
|
||||
# effect. On Linux it doesn't make a difference one way or another.
|
||||
TMPDIR: "/tmp"
|
||||
run: tox -e integration
|
||||
|
||||
- name: Run "Python 3 integration tests"
|
||||
if: ${{ matrix.python-version != '2.7' }}
|
||||
run: tox -e integration3
|
||||
|
||||
- name: Upload eliot.log in case of failure
|
||||
uses: actions/upload-artifact@v1
|
||||
uses: actions/upload-artifact@v3
|
||||
if: failure()
|
||||
with:
|
||||
name: integration.eliot.json
|
||||
|
@ -251,32 +230,20 @@ jobs:
|
|||
- windows-latest
|
||||
- ubuntu-latest
|
||||
python-version:
|
||||
- 2.7
|
||||
- 3.9
|
||||
|
||||
steps:
|
||||
|
||||
- name: Check out Tahoe-LAFS sources
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Get pip cache directory
|
||||
id: pip-cache
|
||||
run: |
|
||||
echo "::set-output name=dir::$(pip cache dir)"
|
||||
|
||||
- name: Use pip cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.dir }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
cache: 'pip' # caching pip dependencies
|
||||
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
|
@ -294,7 +261,7 @@ jobs:
|
|||
run: dist/Tahoe-LAFS/tahoe --version
|
||||
|
||||
- name: Upload PyInstaller package
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: Tahoe-LAFS-${{ matrix.os }}-Python-${{ matrix.python-version }}
|
||||
path: dist/Tahoe-LAFS-*-*.*
|
||||
|
|
|
@ -29,8 +29,7 @@ zope.interface-*.egg
|
|||
.pc
|
||||
|
||||
/src/allmydata/test/plugins/dropin.cache
|
||||
/_trial_temp*
|
||||
/_test_memory/
|
||||
**/_trial_temp*
|
||||
/tmp*
|
||||
/*.patch
|
||||
/dist/
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
version: 2
|
||||
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/requirements.txt
|
|
@ -1,29 +0,0 @@
|
|||
Development Status :: 5 - Production/Stable
|
||||
Environment :: Console
|
||||
Environment :: Web Environment
|
||||
License :: OSI Approved :: GNU General Public License (GPL)
|
||||
License :: DFSG approved
|
||||
License :: Other/Proprietary License
|
||||
Intended Audience :: Developers
|
||||
Intended Audience :: End Users/Desktop
|
||||
Intended Audience :: System Administrators
|
||||
Operating System :: Microsoft
|
||||
Operating System :: Microsoft :: Windows
|
||||
Operating System :: Unix
|
||||
Operating System :: POSIX :: Linux
|
||||
Operating System :: POSIX
|
||||
Operating System :: MacOS :: MacOS X
|
||||
Operating System :: OS Independent
|
||||
Natural Language :: English
|
||||
Programming Language :: C
|
||||
Programming Language :: Python
|
||||
Programming Language :: Python :: 2
|
||||
Programming Language :: Python :: 2.7
|
||||
Topic :: Utilities
|
||||
Topic :: System :: Systems Administration
|
||||
Topic :: System :: Filesystems
|
||||
Topic :: System :: Distributed Computing
|
||||
Topic :: Software Development :: Libraries
|
||||
Topic :: System :: Archiving :: Backup
|
||||
Topic :: System :: Archiving :: Mirroring
|
||||
Topic :: System :: Archiving
|
24
CREDITS
24
CREDITS
|
@ -240,3 +240,27 @@ N: Lukas Pirl
|
|||
E: tahoe@lukas-pirl.de
|
||||
W: http://lukas-pirl.de
|
||||
D: Buildslaves (Debian, Fedora, CentOS; 2016-2021)
|
||||
|
||||
N: Anxhelo Lushka
|
||||
E: anxhelo1995@gmail.com
|
||||
D: Web site design and updates
|
||||
|
||||
N: Fon E. Noel
|
||||
E: fenn25.fn@gmail.com
|
||||
D: bug-fixes and refactoring
|
||||
|
||||
N: Jehad Baeth
|
||||
E: jehad@leastauthority.com
|
||||
D: Documentation improvement
|
||||
|
||||
N: May-Lee Sia
|
||||
E: mayleesia@gmail.com
|
||||
D: Community-manager and documentation improvements
|
||||
|
||||
N: Yash Nayani
|
||||
E: yashaswi.nram@gmail.com
|
||||
D: Installation Guide improvements
|
||||
|
||||
N: Florian Sesser
|
||||
E: florian@private.storage
|
||||
D: OpenMetrics support
|
95
Makefile
95
Makefile
|
@ -17,7 +17,7 @@ PYTHON=python
|
|||
export PYTHON
|
||||
PYFLAKES=flake8
|
||||
export PYFLAKES
|
||||
VIRTUAL_ENV=./.tox/py27
|
||||
VIRTUAL_ENV=./.tox/py37
|
||||
SOURCES=src/allmydata static misc setup.py
|
||||
APPNAME=tahoe-lafs
|
||||
TEST_SUITE=allmydata
|
||||
|
@ -35,7 +35,7 @@ test: .tox/create-venvs.log
|
|||
# Run codechecks first since it takes the least time to report issues early.
|
||||
tox --develop -e codechecks
|
||||
# Run all the test environments in parallel to reduce run-time
|
||||
tox --develop -p auto -e 'py27,py36,pypy27'
|
||||
tox --develop -p auto -e 'py37'
|
||||
.PHONY: test-venv-coverage
|
||||
## Run all tests with coverage collection and reporting.
|
||||
test-venv-coverage:
|
||||
|
@ -51,7 +51,7 @@ test-venv-coverage:
|
|||
.PHONY: test-py3-all
|
||||
## Run all tests under Python 3
|
||||
test-py3-all: .tox/create-venvs.log
|
||||
tox --develop -e py36 allmydata
|
||||
tox --develop -e py37 allmydata
|
||||
|
||||
# This is necessary only if you want to automatically produce a new
|
||||
# _version.py file from the current git history (without doing a build).
|
||||
|
@ -136,37 +136,12 @@ count-lines:
|
|||
# Here is a list of testing tools that can be run with 'python' from a
|
||||
# virtualenv in which Tahoe has been installed. There used to be Makefile
|
||||
# targets for each, but the exact path to a suitable python is now up to the
|
||||
# developer. But as a hint, after running 'tox', ./.tox/py27/bin/python will
|
||||
# developer. But as a hint, after running 'tox', ./.tox/py37/bin/python will
|
||||
# probably work.
|
||||
|
||||
# src/allmydata/test/bench_dirnode.py
|
||||
|
||||
|
||||
# The check-speed and check-grid targets are disabled, since they depend upon
|
||||
# the pre-located $(TAHOE) executable that was removed when we switched to
|
||||
# tox. They will eventually be resurrected as dedicated tox environments.
|
||||
|
||||
# The check-speed target uses a pre-established client node to run a canned
|
||||
# set of performance tests against a test network that is also
|
||||
# pre-established (probably on a remote machine). Provide it with the path to
|
||||
# a local directory where this client node has been created (and populated
|
||||
# with the necessary FURLs of the test network). This target will start that
|
||||
# client with the current code and then run the tests. Afterwards it will
|
||||
# stop the client.
|
||||
#
|
||||
# The 'sleep 5' is in there to give the new client a chance to connect to its
|
||||
# storageservers, since check_speed.py has no good way of doing that itself.
|
||||
|
||||
##.PHONY: check-speed
|
||||
##check-speed: .built
|
||||
## if [ -z '$(TESTCLIENTDIR)' ]; then exit 1; fi
|
||||
## @echo "stopping any leftover client code"
|
||||
## -$(TAHOE) stop $(TESTCLIENTDIR)
|
||||
## $(TAHOE) start $(TESTCLIENTDIR)
|
||||
## sleep 5
|
||||
## $(TAHOE) @src/allmydata/test/check_speed.py $(TESTCLIENTDIR)
|
||||
## $(TAHOE) stop $(TESTCLIENTDIR)
|
||||
|
||||
# The check-grid target also uses a pre-established client node, along with a
|
||||
# long-term directory that contains some well-known files. See the docstring
|
||||
# in src/allmydata/test/check_grid.py to see how to set this up.
|
||||
|
@ -195,12 +170,11 @@ test-clean:
|
|||
# Use 'make distclean' instead to delete all generated files.
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -rf build _trial_temp _test_memory .built
|
||||
rm -rf build _trial_temp .built
|
||||
rm -f `find src *.egg -name '*.so' -or -name '*.pyc'`
|
||||
rm -rf support dist
|
||||
rm -rf `ls -d *.egg | grep -vEe"setuptools-|setuptools_darcs-|darcsver-"`
|
||||
rm -rf *.pyc
|
||||
rm -f bin/tahoe bin/tahoe.pyscript
|
||||
rm -f *.pkg
|
||||
|
||||
.PHONY: distclean
|
||||
|
@ -250,3 +224,62 @@ src/allmydata/_version.py:
|
|||
|
||||
.tox/create-venvs.log: tox.ini setup.py
|
||||
tox --notest -p all | tee -a "$(@)"
|
||||
|
||||
|
||||
# to make a new release:
|
||||
# - create a ticket for the release in Trac
|
||||
# - ensure local copy is up-to-date
|
||||
# - create a branch like "XXXX.release" from up-to-date master
|
||||
# - in the branch, run "make release"
|
||||
# - run "make release-test"
|
||||
# - perform any other sanity-checks on the release
|
||||
# - run "make release-upload"
|
||||
# Note that several commands below hard-code "meejah"; if you are
|
||||
# someone else please adjust them.
|
||||
release:
|
||||
@echo "Is checkout clean?"
|
||||
git diff-files --quiet
|
||||
git diff-index --quiet --cached HEAD --
|
||||
|
||||
@echo "Clean docs build area"
|
||||
rm -rf docs/_build/
|
||||
|
||||
@echo "Install required build software"
|
||||
python3 -m pip install --editable .[build]
|
||||
|
||||
@echo "Test README"
|
||||
python3 setup.py check -r -s
|
||||
|
||||
@echo "Update NEWS"
|
||||
python3 -m towncrier build --yes --version `python3 misc/build_helpers/update-version.py --no-tag`
|
||||
git add -u
|
||||
git commit -m "update NEWS for release"
|
||||
|
||||
# note that this always bumps the "middle" number, e.g. from 1.17.1 -> 1.18.0
|
||||
# and produces a tag into the Git repository
|
||||
@echo "Bump version and create tag"
|
||||
python3 misc/build_helpers/update-version.py
|
||||
|
||||
@echo "Build and sign wheel"
|
||||
python3 setup.py bdist_wheel
|
||||
gpg --pinentry=loopback -u meejah@meejah.ca --armor --detach-sign dist/tahoe_lafs-`git describe | cut -b 12-`-py3-none-any.whl
|
||||
ls dist/*`git describe | cut -b 12-`*
|
||||
|
||||
@echo "Build and sign source-dist"
|
||||
python3 setup.py sdist
|
||||
gpg --pinentry=loopback -u meejah@meejah.ca --armor --detach-sign dist/tahoe-lafs-`git describe | cut -b 12-`.tar.gz
|
||||
ls dist/*`git describe | cut -b 12-`*
|
||||
|
||||
# basically just a bare-minimum smoke-test that it installs and runs
|
||||
release-test:
|
||||
gpg --verify dist/tahoe-lafs-`git describe | cut -b 12-`.tar.gz.asc
|
||||
gpg --verify dist/tahoe_lafs-`git describe | cut -b 12-`-py3-none-any.whl.asc
|
||||
virtualenv testmf_venv
|
||||
testmf_venv/bin/pip install dist/tahoe_lafs-`git describe | cut -b 12-`-py3-none-any.whl
|
||||
testmf_venv/bin/tahoe --version
|
||||
rm -rf testmf_venv
|
||||
|
||||
release-upload:
|
||||
scp dist/*`git describe | cut -b 12-`* meejah@tahoe-lafs.org:/home/source/downloads
|
||||
git push origin_push tahoe-lafs-`git describe | cut -b 12-`
|
||||
twine upload dist/tahoe_lafs-`git describe | cut -b 12-`-py3-none-any.whl dist/tahoe_lafs-`git describe | cut -b 12-`-py3-none-any.whl.asc dist/tahoe-lafs-`git describe | cut -b 12-`.tar.gz dist/tahoe-lafs-`git describe | cut -b 12-`.tar.gz.asc
|
||||
|
|
18
README.rst
18
README.rst
|
@ -53,12 +53,11 @@ For more detailed instructions, read `Installing Tahoe-LAFS <docs/Installation/i
|
|||
|
||||
Once ``tahoe --version`` works, see `How to Run Tahoe-LAFS <docs/running.rst>`__ to learn how to set up your first Tahoe-LAFS node.
|
||||
|
||||
🐍 Python 3 Support
|
||||
--------------------
|
||||
🐍 Python 2
|
||||
-----------
|
||||
|
||||
Python 3 support has been introduced starting with Tahoe-LAFS 1.16.0, alongside Python 2.
|
||||
System administrators are advised to start running Tahoe on Python 3 and should expect Python 2 support to be dropped in a future version.
|
||||
Please, feel free to file issues if you run into bugs while running Tahoe on Python 3.
|
||||
Python 3.7 or later is now required.
|
||||
If you are still using Python 2.7, use Tahoe-LAFS version 1.17.1.
|
||||
|
||||
|
||||
🤖 Issues
|
||||
|
@ -95,7 +94,14 @@ As a community-driven open source project, Tahoe-LAFS welcomes contributions of
|
|||
|
||||
- `Patch reviews <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/PatchReviewProcess>`__
|
||||
|
||||
Before authoring or reviewing a patch, please familiarize yourself with the `Coding Standard <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CodingStandards>`__ and the `Contributor Code of Conduct <docs/CODE_OF_CONDUCT.md>`__.
|
||||
Before authoring or reviewing a patch, please familiarize yourself with the `Coding Standard <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CodingStandards>`__ and the `Contributor Code of Conduct <docs/CODE_OF_CONDUCT.md>`__.
|
||||
|
||||
|
||||
🥳 First Contribution?
|
||||
----------------------
|
||||
|
||||
If you are committing to Tahoe for the very first time, it's required that you add your name to our contributor list in `CREDITS <CREDITS>`__. Please ensure that this addition has it's own commit within your first contribution.
|
||||
|
||||
|
||||
🤝 Supporters
|
||||
--------------
|
||||
|
|
|
@ -0,0 +1,102 @@
|
|||
let
|
||||
# sources.nix contains information about which versions of some of our
|
||||
# dependencies we should use. since we use it to pin nixpkgs and the PyPI
|
||||
# package database, roughly all the rest of our dependencies are *also*
|
||||
# pinned - indirectly.
|
||||
#
|
||||
# sources.nix is managed using a tool called `niv`. as an example, to
|
||||
# update to the most recent version of nixpkgs from the 21.11 maintenance
|
||||
# release, in the top-level tahoe-lafs checkout directory you run:
|
||||
#
|
||||
# niv update nixpkgs-21.11
|
||||
#
|
||||
# or, to update the PyPI package database -- which is necessary to make any
|
||||
# newly released packages visible -- you likewise run:
|
||||
#
|
||||
# niv update pypi-deps-db
|
||||
#
|
||||
# niv also supports chosing a specific revision, following a different
|
||||
# branch, etc. find complete documentation for the tool at
|
||||
# https://github.com/nmattia/niv
|
||||
sources = import nix/sources.nix;
|
||||
in
|
||||
{
|
||||
pkgsVersion ? "nixpkgs-21.11" # a string which chooses a nixpkgs from the
|
||||
# niv-managed sources data
|
||||
|
||||
, pkgs ? import sources.${pkgsVersion} { } # nixpkgs itself
|
||||
|
||||
, pypiData ? sources.pypi-deps-db # the pypi package database snapshot to use
|
||||
# for dependency resolution
|
||||
|
||||
, pythonVersion ? "python37" # a string choosing the python derivation from
|
||||
# nixpkgs to target
|
||||
|
||||
, extras ? [ "tor" "i2p" ] # a list of strings identifying tahoe-lafs extras,
|
||||
# the dependencies of which the resulting package
|
||||
# will also depend on. Include all of the runtime
|
||||
# extras by default because the incremental cost of
|
||||
# including them is a lot smaller than the cost of
|
||||
# re-building the whole thing to add them.
|
||||
|
||||
, mach-nix ? import sources.mach-nix { # the mach-nix package to use to build
|
||||
# the tahoe-lafs package
|
||||
inherit pkgs pypiData;
|
||||
python = pythonVersion;
|
||||
}
|
||||
}:
|
||||
# The project name, version, and most other metadata are automatically
|
||||
# extracted from the source. Some requirements are not properly extracted
|
||||
# and those cases are handled below. The version can only be extracted if
|
||||
# `setup.py update_version` has been run (this is not at all ideal but it
|
||||
# seems difficult to fix) - so for now just be sure to run that first.
|
||||
mach-nix.buildPythonPackage rec {
|
||||
# Define the location of the Tahoe-LAFS source to be packaged. Clean up all
|
||||
# as many of the non-source files (eg the `.git` directory, `~` backup
|
||||
# files, nix's own `result` symlink, etc) as possible to avoid needing to
|
||||
# re-build when files that make no difference to the package have changed.
|
||||
src = pkgs.lib.cleanSource ./.;
|
||||
|
||||
# Select whichever package extras were requested.
|
||||
inherit extras;
|
||||
|
||||
# Define some extra requirements that mach-nix does not automatically detect
|
||||
# from inspection of the source. We typically don't need to put version
|
||||
# constraints on any of these requirements. The pypi-deps-db we're
|
||||
# operating with makes dependency resolution deterministic so as long as it
|
||||
# works once it will always work. It could be that in the future we update
|
||||
# pypi-deps-db and an incompatibility arises - in which case it would make
|
||||
# sense to apply some version constraints here.
|
||||
requirementsExtra = ''
|
||||
# mach-nix does not yet support pyproject.toml which means it misses any
|
||||
# build-time requirements of our dependencies which are declared in such a
|
||||
# file. Tell it about them here.
|
||||
setuptools_rust
|
||||
|
||||
# mach-nix does not yet parse environment markers (e.g. "python > '3.0'")
|
||||
# correctly. It misses all of our requirements which have an environment marker.
|
||||
# Duplicate them here.
|
||||
foolscap
|
||||
eliot
|
||||
pyrsistent
|
||||
collections-extended
|
||||
'';
|
||||
|
||||
# Specify where mach-nix should find packages for our Python dependencies.
|
||||
# There are some reasonable defaults so we only need to specify certain
|
||||
# packages where the default configuration runs into some issue.
|
||||
providers = {
|
||||
};
|
||||
|
||||
# Define certain overrides to the way Python dependencies are built.
|
||||
_ = {
|
||||
# Remove a click-default-group patch for a test suite problem which no
|
||||
# longer applies because the project apparently no longer has a test suite
|
||||
# in its source distribution.
|
||||
click-default-group.patches = [];
|
||||
};
|
||||
|
||||
passthru.meta.mach-nix = {
|
||||
inherit providers _;
|
||||
};
|
||||
}
|
|
@ -28,15 +28,15 @@ To install Tahoe-LAFS on Windows:
|
|||
3. Open the installer by double-clicking it. Select the **Add Python to PATH** check-box, then click **Install Now**.
|
||||
|
||||
4. Start PowerShell and enter the following command to verify python installation::
|
||||
|
||||
|
||||
python --version
|
||||
|
||||
5. Enter the following command to install Tahoe-LAFS::
|
||||
|
||||
|
||||
pip install tahoe-lafs
|
||||
|
||||
6. Verify installation by checking for the version::
|
||||
|
||||
|
||||
tahoe --version
|
||||
|
||||
If you want to hack on Tahoe's source code, you can install Tahoe in a ``virtualenv`` on your Windows Machine. To learn more, see :doc:`install-on-windows`.
|
||||
|
@ -56,13 +56,13 @@ If you are working on MacOS or a Linux distribution which does not have Tahoe-LA
|
|||
* **pip**: Most python installations already include `pip`. However, if your installation does not, see `pip installation <https://pip.pypa.io/en/stable/installing/>`_.
|
||||
|
||||
2. Install Tahoe-LAFS using pip::
|
||||
|
||||
|
||||
pip install tahoe-lafs
|
||||
|
||||
3. Verify installation by checking for the version::
|
||||
|
||||
|
||||
tahoe --version
|
||||
|
||||
If you are looking to hack on the source code or run pre-release code, we recommend you install Tahoe-LAFS on a `virtualenv` instance. To learn more, see :doc:`install-on-linux`.
|
||||
If you are looking to hack on the source code or run pre-release code, we recommend you install Tahoe-LAFS on a `virtualenv` instance. To learn more, see :doc:`install-on-linux`.
|
||||
|
||||
You can always write to the `tahoe-dev mailing list <https://lists.tahoe-lafs.org/mailman/listinfo/tahoe-dev>`_ or chat on the `Libera.chat IRC <irc://irc.libera.chat/%23tahoe-lafs>`_ if you are not able to get Tahoe-LAFS up and running on your deployment.
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
|
||||
import psutil
|
||||
import filelock
|
||||
|
||||
|
||||
def can_spawn_tahoe(pidfile):
|
||||
"""
|
||||
Determine if we can spawn a Tahoe-LAFS for the given pidfile. That
|
||||
pidfile may be deleted if it is stale.
|
||||
|
||||
:param pathlib.Path pidfile: the file to check, that is the Path
|
||||
to "running.process" in a Tahoe-LAFS configuration directory
|
||||
|
||||
:returns bool: True if we can spawn `tahoe run` here
|
||||
"""
|
||||
lockpath = pidfile.parent / (pidfile.name + ".lock")
|
||||
with filelock.FileLock(lockpath):
|
||||
try:
|
||||
with pidfile.open("r") as f:
|
||||
pid, create_time = f.read().strip().split(" ", 1)
|
||||
except FileNotFoundError:
|
||||
return True
|
||||
|
||||
# somewhat interesting: we have a pidfile
|
||||
pid = int(pid)
|
||||
create_time = float(create_time)
|
||||
|
||||
try:
|
||||
proc = psutil.Process(pid)
|
||||
# most interesting case: there _is_ a process running at the
|
||||
# recorded PID -- but did it just happen to get that PID, or
|
||||
# is it the very same one that wrote the file?
|
||||
if create_time == proc.create_time():
|
||||
# _not_ stale! another intance is still running against
|
||||
# this configuration
|
||||
return False
|
||||
|
||||
except psutil.NoSuchProcess:
|
||||
pass
|
||||
|
||||
# the file is stale
|
||||
pidfile.unlink()
|
||||
return True
|
||||
|
||||
|
||||
from pathlib import Path
|
||||
print("can spawn?", can_spawn_tahoe(Path("running.process")))
|
|
@ -63,7 +63,7 @@ release = u'1.x'
|
|||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
language = "en"
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
|
|
|
@ -47,8 +47,8 @@ servers must be configured with a way to first authenticate a user (confirm
|
|||
that a prospective client has a legitimate claim to whatever authorities we
|
||||
might grant a particular user), and second to decide what directory cap
|
||||
should be used as the root directory for a log-in by the authenticated user.
|
||||
A username and password can be used; as of Tahoe-LAFS v1.11, RSA or DSA
|
||||
public key authentication is also supported.
|
||||
As of Tahoe-LAFS v1.17,
|
||||
RSA/DSA public key authentication is the only supported mechanism.
|
||||
|
||||
Tahoe-LAFS provides two mechanisms to perform this user-to-cap mapping.
|
||||
The first (recommended) is a simple flat file with one account per line.
|
||||
|
@ -59,20 +59,14 @@ Creating an Account File
|
|||
|
||||
To use the first form, create a file (for example ``BASEDIR/private/accounts``)
|
||||
in which each non-comment/non-blank line is a space-separated line of
|
||||
(USERNAME, PASSWORD, ROOTCAP), like so::
|
||||
(USERNAME, KEY-TYPE, PUBLIC-KEY, ROOTCAP), like so::
|
||||
|
||||
% cat BASEDIR/private/accounts
|
||||
# This is a password line: username password cap
|
||||
alice password URI:DIR2:ioej8xmzrwilg772gzj4fhdg7a:wtiizszzz2rgmczv4wl6bqvbv33ag4kvbr6prz3u6w3geixa6m6a
|
||||
bob sekrit URI:DIR2:6bdmeitystckbl9yqlw7g56f4e:serp5ioqxnh34mlbmzwvkp3odehsyrr7eytt5f64we3k9hhcrcja
|
||||
|
||||
# This is a public key line: username keytype pubkey cap
|
||||
# (Tahoe-LAFS v1.11 or later)
|
||||
carol ssh-rsa AAAA... URI:DIR2:ovjy4yhylqlfoqg2vcze36dhde:4d4f47qko2xm5g7osgo2yyidi5m4muyo2vjjy53q4vjju2u55mfa
|
||||
|
||||
For public key authentication, the keytype may be either "ssh-rsa" or "ssh-dsa".
|
||||
To avoid ambiguity between passwords and public key types, a password cannot
|
||||
start with "ssh-".
|
||||
The key type may be either "ssh-rsa" or "ssh-dsa".
|
||||
|
||||
Now add an ``accounts.file`` directive to your ``tahoe.cfg`` file, as described in
|
||||
the next sections.
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
Preparing to Authenticate Release (Setting up GPG)
|
||||
--------------------------------------------------
|
||||
|
||||
In other to keep releases authentic it's required that releases are signed before being
|
||||
published. This ensure's that users of Tahoe are able to verify that the version of Tahoe
|
||||
they are using is coming from a trusted or at the very least known source.
|
||||
|
||||
The authentication is done using the ``GPG`` implementation of ``OpenGPG`` to be able to complete
|
||||
the release steps you would have to download the ``GPG`` software and setup a key(identity).
|
||||
|
||||
- `Download <https://www.gnupg.org/download/>`__ and install GPG for your operating system.
|
||||
- Generate a key pair using ``gpg --gen-key``. *Some questions would be asked to personalize your key configuration.*
|
||||
|
||||
You might take additional steps including:
|
||||
|
||||
- Setting up a revocation certificate (Incase you lose your secret key)
|
||||
- Backing up your key pair
|
||||
- Upload your fingerprint to a keyserver such as `openpgp.org <https://keys.openpgp.org/>`__
|
|
@ -29,6 +29,7 @@ Contents:
|
|||
contributing
|
||||
CODE_OF_CONDUCT
|
||||
release-checklist
|
||||
gpg-setup
|
||||
|
||||
servers
|
||||
helper
|
||||
|
|
|
@ -30,15 +30,15 @@ Glossary
|
|||
introducer
|
||||
a Tahoe-LAFS process at a known location configured to re-publish announcements about the location of storage servers
|
||||
|
||||
fURL
|
||||
:ref:`fURLs <fURLs>`
|
||||
a self-authenticating URL-like string which can be used to locate a remote object using the Foolscap protocol
|
||||
(the storage service is an example of such an object)
|
||||
|
||||
NURL
|
||||
:ref:`NURLs <NURLs>`
|
||||
a self-authenticating URL-like string almost exactly like a fURL but without being tied to Foolscap
|
||||
|
||||
swissnum
|
||||
a short random string which is part of a fURL and which acts as a shared secret to authorize clients to use a storage service
|
||||
a short random string which is part of a fURL/NURL and which acts as a shared secret to authorize clients to use a storage service
|
||||
|
||||
lease
|
||||
state associated with a share informing a storage server of the duration of storage desired by a client
|
||||
|
@ -211,15 +211,15 @@ To further clarify, consider this example.
|
|||
Alice operates a storage node.
|
||||
Alice generates a key pair and secures it properly.
|
||||
Alice generates a self-signed storage node certificate with the key pair.
|
||||
Alice's storage node announces (to an introducer) a fURL containing (among other information) the SPKI hash.
|
||||
Alice's storage node announces (to an introducer) a NURL containing (among other information) the SPKI hash.
|
||||
Imagine the SPKI hash is ``i5xb...``.
|
||||
This results in a fURL of ``pb://i5xb...@example.com:443/g3m5...#v=1``.
|
||||
This results in a NURL of ``pb://i5xb...@example.com:443/g3m5...#v=1``.
|
||||
Bob creates a client node pointed at the same introducer.
|
||||
Bob's client node receives the announcement from Alice's storage node
|
||||
(indirected through the introducer).
|
||||
|
||||
Bob's client node recognizes the fURL as referring to an HTTP-dialect server due to the ``v=1`` fragment.
|
||||
Bob's client node can now perform a TLS handshake with a server at the address in the fURL location hints
|
||||
Bob's client node recognizes the NURL as referring to an HTTP-dialect server due to the ``v=1`` fragment.
|
||||
Bob's client node can now perform a TLS handshake with a server at the address in the NURL location hints
|
||||
(``example.com:443`` in this example).
|
||||
Following the above described validation procedures,
|
||||
Bob's client node can determine whether it has reached Alice's storage node or not.
|
||||
|
@ -230,7 +230,7 @@ Additionally,
|
|||
by continuing to interact using TLS,
|
||||
Bob's client and Alice's storage node are assured of both **message authentication** and **message confidentiality**.
|
||||
|
||||
Bob's client further inspects the fURL for the *swissnum*.
|
||||
Bob's client further inspects the NURL for the *swissnum*.
|
||||
When Bob's client issues HTTP requests to Alice's storage node it includes the *swissnum* in its requests.
|
||||
**Storage authorization** has been achieved.
|
||||
|
||||
|
@ -266,8 +266,8 @@ Generation of a new certificate allows for certain non-optimal conditions to be
|
|||
* The ``commonName`` of ``newpb_thingy`` may be changed to a more descriptive value.
|
||||
* A ``notValidAfter`` field with a timestamp in the past may be updated.
|
||||
|
||||
Storage nodes will announce a new fURL for this new HTTP-based server.
|
||||
This fURL will be announced alongside their existing Foolscap-based server's fURL.
|
||||
Storage nodes will announce a new NURL for this new HTTP-based server.
|
||||
This NURL will be announced alongside their existing Foolscap-based server's fURL.
|
||||
Such an announcement will resemble this::
|
||||
|
||||
{
|
||||
|
@ -312,7 +312,7 @@ The follow sequence of events is likely:
|
|||
#. The client uses the information in its cache to open a Foolscap connection to the storage server.
|
||||
|
||||
Ideally,
|
||||
the client would not rely on an update from the introducer to give it the GBS fURL for the updated storage server.
|
||||
the client would not rely on an update from the introducer to give it the GBS NURL for the updated storage server.
|
||||
Therefore,
|
||||
when an updated client connects to a storage server using Foolscap,
|
||||
it should request the server's version information.
|
||||
|
@ -350,6 +350,11 @@ Because of the simple types used throughout
|
|||
and the equivalence described in `RFC 7049`_
|
||||
these examples should be representative regardless of which of these two encodings is chosen.
|
||||
|
||||
The one exception is sets.
|
||||
For CBOR messages, any sequence that is semantically a set (i.e. no repeated values allowed, order doesn't matter, and elements are hashable in Python) should be sent as a set.
|
||||
Tag 6.258 is used to indicate sets in CBOR; see `the CBOR registry <https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml>`_ for more details.
|
||||
Sets will be represented as JSON lists in examples because JSON doesn't support sets.
|
||||
|
||||
HTTP Design
|
||||
~~~~~~~~~~~
|
||||
|
||||
|
@ -363,17 +368,35 @@ one branch contains all of the share data;
|
|||
another branch contains all of the lease data;
|
||||
etc.
|
||||
|
||||
Authorization is required for all endpoints.
|
||||
An ``Authorization`` header in requests is required for all endpoints.
|
||||
The standard HTTP authorization protocol is used.
|
||||
The authentication *type* used is ``Tahoe-LAFS``.
|
||||
The swissnum from the NURL used to locate the storage service is used as the *credentials*.
|
||||
If credentials are not presented or the swissnum is not associated with a storage service then no storage processing is performed and the request receives an ``UNAUTHORIZED`` response.
|
||||
If credentials are not presented or the swissnum is not associated with a storage service then no storage processing is performed and the request receives an ``401 UNAUTHORIZED`` response.
|
||||
|
||||
There are also, for some endpoints, secrets sent via ``X-Tahoe-Authorization`` headers.
|
||||
If these are:
|
||||
|
||||
1. Missing.
|
||||
2. The wrong length.
|
||||
3. Not the expected kind of secret.
|
||||
4. They are otherwise unparseable before they are actually semantically used.
|
||||
|
||||
the server will respond with ``400 BAD REQUEST``.
|
||||
401 is not used because this isn't an authorization problem, this is a "you sent garbage and should know better" bug.
|
||||
|
||||
If authorization using the secret fails, then a ``401 UNAUTHORIZED`` response should be sent.
|
||||
|
||||
Encoding
|
||||
~~~~~~~~
|
||||
|
||||
* ``storage_index`` should be base32 encoded (RFC3548) in URLs.
|
||||
|
||||
General
|
||||
~~~~~~~
|
||||
|
||||
``GET /v1/version``
|
||||
!!!!!!!!!!!!!!!!!!!
|
||||
``GET /storage/v1/version``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Retrieve information about the version of the storage server.
|
||||
Information is returned as an encoded mapping.
|
||||
|
@ -386,27 +409,28 @@ For example::
|
|||
"tolerates-immutable-read-overrun": true,
|
||||
"delete-mutable-shares-with-zero-length-writev": true,
|
||||
"fills-holes-with-zero-bytes": true,
|
||||
"prevents-read-past-end-of-share-data": true,
|
||||
"gbs-anonymous-storage-url": "pb://...#v=1"
|
||||
"prevents-read-past-end-of-share-data": true
|
||||
},
|
||||
"application-version": "1.13.0"
|
||||
}
|
||||
|
||||
``PUT /v1/lease/:storage_index``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
``PUT /storage/v1/lease/:storage_index``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Either renew or create a new lease on the bucket addressed by ``storage_index``.
|
||||
The details of the lease are encoded in the request body.
|
||||
|
||||
The renew secret and cancellation secret should be included as ``X-Tahoe-Authorization`` headers.
|
||||
For example::
|
||||
|
||||
{"renew-secret": "abcd", "cancel-secret": "efgh"}
|
||||
X-Tahoe-Authorization: lease-renew-secret <base64-lease-renew-secret>
|
||||
X-Tahoe-Authorization: lease-cancel-secret <base64-lease-cancel-secret>
|
||||
|
||||
If the ``renew-secret`` value matches an existing lease
|
||||
If the ``lease-renew-secret`` value matches an existing lease
|
||||
then the expiration time of that lease will be changed to 31 days after the time of this operation.
|
||||
If it does not match an existing lease
|
||||
then a new lease will be created with this ``renew-secret`` which expires 31 days after the time of this operation.
|
||||
then a new lease will be created with this ``lease-renew-secret`` which expires 31 days after the time of this operation.
|
||||
|
||||
``renew-secret`` and ``cancel-secret`` values must be 32 bytes long.
|
||||
``lease-renew-secret`` and ``lease-cancel-secret`` values must be 32 bytes long.
|
||||
The server treats them as opaque values.
|
||||
:ref:`Share Leases` gives details about how the Tahoe-LAFS storage client constructs these values.
|
||||
|
||||
|
@ -423,8 +447,10 @@ In these cases the server takes no action and returns ``NOT FOUND``.
|
|||
Discussion
|
||||
``````````
|
||||
|
||||
We considered an alternative where ``renew-secret`` and ``cancel-secret`` are placed in query arguments on the request path.
|
||||
We chose to put these values into the request body to make the URL simpler.
|
||||
We considered an alternative where ``lease-renew-secret`` and ``lease-cancel-secret`` are placed in query arguments on the request path.
|
||||
This increases chances of leaking secrets in logs.
|
||||
Putting the secrets in the body reduces the chances of leaking secrets,
|
||||
but eventually we chose headers as the least likely information to be logged.
|
||||
|
||||
Several behaviors here are blindly copied from the Foolscap-based storage server protocol.
|
||||
|
||||
|
@ -441,8 +467,8 @@ Immutable
|
|||
Writing
|
||||
~~~~~~~
|
||||
|
||||
``POST /v1/immutable/:storage_index``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
``POST /storage/v1/immutable/:storage_index``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Initialize an immutable storage index with some buckets.
|
||||
The buckets may have share data written to them once.
|
||||
|
@ -450,18 +476,34 @@ A lease is also created for the shares.
|
|||
Details of the buckets to create are encoded in the request body.
|
||||
For example::
|
||||
|
||||
{"renew-secret": "efgh", "cancel-secret": "ijkl",
|
||||
"share-numbers": [1, 7, ...], "allocated-size": 12345}
|
||||
{"share-numbers": [1, 7, ...], "allocated-size": 12345}
|
||||
|
||||
The request must include ``X-Tahoe-Authorization`` HTTP headers that set the various secrets—upload, lease renewal, lease cancellation—that will be later used to authorize various operations.
|
||||
For example::
|
||||
|
||||
X-Tahoe-Authorization: lease-renew-secret <base64-lease-renew-secret>
|
||||
X-Tahoe-Authorization: lease-cancel-secret <base64-lease-cancel-secret>
|
||||
X-Tahoe-Authorization: upload-secret <base64-upload-secret>
|
||||
|
||||
The response body includes encoded information about the created buckets.
|
||||
For example::
|
||||
|
||||
{"already-have": [1, ...], "allocated": [7, ...]}
|
||||
|
||||
The upload secret is an opaque _byte_ string.
|
||||
|
||||
Handling repeat calls:
|
||||
|
||||
* If the same API call is repeated with the same upload secret, the response is the same and no change is made to server state.
|
||||
This is necessary to ensure retries work in the face of lost responses from the server.
|
||||
* If the API calls is with a different upload secret, this implies a new client, perhaps because the old client died.
|
||||
Or it may happen because the client wants to upload a different share number than a previous client.
|
||||
New shares will be created, existing shares will be unchanged, regardless of whether the upload secret matches or not.
|
||||
|
||||
Discussion
|
||||
``````````
|
||||
|
||||
We considered making this ``POST /v1/immutable`` instead.
|
||||
We considered making this ``POST /storage/v1/immutable`` instead.
|
||||
The motivation was to keep *storage index* out of the request URL.
|
||||
Request URLs have an elevated chance of being logged by something.
|
||||
We were concerned that having the *storage index* logged may increase some risks.
|
||||
|
@ -482,13 +524,27 @@ The response includes ``already-have`` and ``allocated`` for two reasons:
|
|||
This might be because a server has become unavailable and a remaining server needs to store more shares for the upload.
|
||||
It could also just be that the client's preferred servers have changed.
|
||||
|
||||
``PATCH /v1/immutable/:storage_index/:share_number``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
Regarding upload secrets,
|
||||
the goal is for uploading and aborting (see next sections) to be authenticated by more than just the storage index.
|
||||
In the future, we may want to generate them in a way that allows resuming/canceling when the client has issues.
|
||||
In the short term, they can just be a random byte string.
|
||||
The primary security constraint is that each upload to each server has its own unique upload key,
|
||||
tied to uploading that particular storage index to this particular server.
|
||||
|
||||
Rejected designs for upload secrets:
|
||||
|
||||
* Upload secret per share number.
|
||||
In order to make the secret unguessable by attackers, which includes other servers,
|
||||
it must contain randomness.
|
||||
Randomness means there is no need to have a secret per share, since adding share-specific content to randomness doesn't actually make the secret any better.
|
||||
|
||||
``PATCH /storage/v1/immutable/:storage_index/:share_number``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Write data for the indicated share.
|
||||
The share number must belong to the storage index.
|
||||
The request body is the raw share data (i.e., ``application/octet-stream``).
|
||||
*Content-Range* requests are encouraged for large transfers to allow partially complete uploads to be resumed.
|
||||
*Content-Range* requests are required; for large transfers this allows partially complete uploads to be resumed.
|
||||
For example,
|
||||
a 1MiB share can be divided in to eight separate 128KiB chunks.
|
||||
Each chunk can be uploaded in a separate request.
|
||||
|
@ -498,6 +554,12 @@ If any one of these requests fails then at most 128KiB of upload work needs to b
|
|||
The server must recognize when all of the data has been received and mark the share as complete
|
||||
(which it can do because it was informed of the size when the storage index was initialized).
|
||||
|
||||
The request must include a ``X-Tahoe-Authorization`` header that includes the upload secret::
|
||||
|
||||
X-Tahoe-Authorization: upload-secret <base64-upload-secret>
|
||||
|
||||
Responses:
|
||||
|
||||
* When a chunk that does not complete the share is successfully uploaded the response is ``OK``.
|
||||
The response body indicates the range of share data that has yet to be uploaded.
|
||||
That is::
|
||||
|
@ -517,20 +579,6 @@ The server must recognize when all of the data has been received and mark the sh
|
|||
the response is ``CONFLICT``.
|
||||
At this point the only thing to do is abort the upload and start from scratch (see below).
|
||||
|
||||
``PUT /v1/immutable/:storage_index/:share_number/abort``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
This cancels an *in-progress* upload.
|
||||
|
||||
The response code:
|
||||
|
||||
* When the upload is still in progress and therefore the abort has succeeded,
|
||||
the response is ``OK``.
|
||||
Future uploads can start from scratch with no pre-existing upload state stored on the server.
|
||||
* If the uploaded has already finished, the response is 405 (Method Not Allowed)
|
||||
and no change is made.
|
||||
|
||||
|
||||
Discussion
|
||||
``````````
|
||||
|
||||
|
@ -549,12 +597,31 @@ From RFC 7231::
|
|||
PATCH method defined in [RFC5789]).
|
||||
|
||||
|
||||
``POST /v1/immutable/:storage_index/:share_number/corrupt``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Advise the server the data read from the indicated share was corrupt.
|
||||
The request body includes an human-meaningful string with details about the corruption.
|
||||
It also includes potentially important details about the share.
|
||||
``PUT /storage/v1/immutable/:storage_index/:share_number/abort``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
This cancels an *in-progress* upload.
|
||||
|
||||
The request must include a ``X-Tahoe-Authorization`` header that includes the upload secret::
|
||||
|
||||
X-Tahoe-Authorization: upload-secret <base64-upload-secret>
|
||||
|
||||
The response code:
|
||||
|
||||
* When the upload is still in progress and therefore the abort has succeeded,
|
||||
the response is ``OK``.
|
||||
Future uploads can start from scratch with no pre-existing upload state stored on the server.
|
||||
* If the uploaded has already finished, the response is 405 (Method Not Allowed)
|
||||
and no change is made.
|
||||
|
||||
|
||||
``POST /storage/v1/immutable/:storage_index/:share_number/corrupt``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Advise the server the data read from the indicated share was corrupt. The
|
||||
request body includes an human-meaningful text string with details about the
|
||||
corruption. It also includes potentially important details about the share.
|
||||
|
||||
For example::
|
||||
|
||||
|
@ -562,25 +629,35 @@ For example::
|
|||
|
||||
.. share-type, storage-index, and share-number are inferred from the URL
|
||||
|
||||
The response code is OK (200) by default, or NOT FOUND (404) if the share
|
||||
couldn't be found.
|
||||
|
||||
Reading
|
||||
~~~~~~~
|
||||
|
||||
``GET /v1/immutable/:storage_index/shares``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
``GET /storage/v1/immutable/:storage_index/shares``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Retrieve a list indicating all shares available for the indicated storage index.
|
||||
For example::
|
||||
Retrieve a list (semantically, a set) indicating all shares available for the
|
||||
indicated storage index. For example::
|
||||
|
||||
[1, 5]
|
||||
|
||||
``GET /v1/immutable/:storage_index/:share_number``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
An unknown storage index results in an empty list.
|
||||
|
||||
``GET /storage/v1/immutable/:storage_index/:share_number``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Read a contiguous sequence of bytes from one share in one bucket.
|
||||
The response body is the raw share data (i.e., ``application/octet-stream``).
|
||||
The ``Range`` header may be used to request exactly one ``bytes`` range.
|
||||
The ``Range`` header may be used to request exactly one ``bytes`` range, in which case the response code will be 206 (partial content).
|
||||
Interpretation and response behavior is as specified in RFC 7233 § 4.1.
|
||||
Multiple ranges in a single request are *not* supported.
|
||||
Multiple ranges in a single request are *not* supported; open-ended ranges are also not supported.
|
||||
|
||||
If the response reads beyond the end of the data, the response may be shorter than the requested range.
|
||||
The resulting ``Content-Range`` header will be consistent with the returned data.
|
||||
|
||||
If the response to a query is an empty range, the ``NO CONTENT`` (204) response code will be used.
|
||||
|
||||
Discussion
|
||||
``````````
|
||||
|
@ -609,8 +686,8 @@ Mutable
|
|||
Writing
|
||||
~~~~~~~
|
||||
|
||||
``POST /v1/mutable/:storage_index/read-test-write``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
``POST /storage/v1/mutable/:storage_index/read-test-write``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
General purpose read-test-and-write operation for mutable storage indexes.
|
||||
A mutable storage index is also called a "slot"
|
||||
|
@ -619,16 +696,16 @@ The first write operation on a mutable storage index creates it
|
|||
(that is,
|
||||
there is no separate "create this storage index" operation as there is for the immutable storage index type).
|
||||
|
||||
The request body includes the secrets necessary to rewrite to the shares
|
||||
along with test, read, and write vectors for the operation.
|
||||
The request must include ``X-Tahoe-Authorization`` headers with write enabler and lease secrets::
|
||||
|
||||
X-Tahoe-Authorization: write-enabler <base64-write-enabler-secret>
|
||||
X-Tahoe-Authorization: lease-cancel-secret <base64-lease-cancel-secret>
|
||||
X-Tahoe-Authorization: lease-renew-secret <base64-lease-renew-secret>
|
||||
|
||||
The request body includes test, read, and write vectors for the operation.
|
||||
For example::
|
||||
|
||||
{
|
||||
"secrets": {
|
||||
"write-enabler": "abcd",
|
||||
"lease-renew": "efgh",
|
||||
"lease-cancel": "ijkl"
|
||||
},
|
||||
"test-write-vectors": {
|
||||
0: {
|
||||
"test": [{
|
||||
|
@ -665,22 +742,31 @@ As a result, if there is no data at all, an empty bytestring is returned no matt
|
|||
Reading
|
||||
~~~~~~~
|
||||
|
||||
``GET /v1/mutable/:storage_index/shares``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
``GET /storage/v1/mutable/:storage_index/shares``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Retrieve a list indicating all shares available for the indicated storage index.
|
||||
For example::
|
||||
Retrieve a set indicating all shares available for the indicated storage index.
|
||||
For example (this is shown as list, since it will be list for JSON, but will be set for CBOR)::
|
||||
|
||||
[1, 5]
|
||||
|
||||
``GET /v1/mutable/:storage_index?share=:s0&share=:sN&offset=:o1&size=:z0&offset=:oN&size=:zN``
|
||||
``GET /storage/v1/mutable/:storage_index/:share_number``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Read data from the indicated mutable shares.
|
||||
Just like ``GET /v1/mutable/:storage_index``.
|
||||
Read data from the indicated mutable shares, just like ``GET /storage/v1/immutable/:storage_index``
|
||||
|
||||
``POST /v1/mutable/:storage_index/:share_number/corrupt``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
The ``Range`` header may be used to request exactly one ``bytes`` range, in which case the response code will be 206 (partial content).
|
||||
Interpretation and response behavior is as specified in RFC 7233 § 4.1.
|
||||
Multiple ranges in a single request are *not* supported; open-ended ranges are also not supported.
|
||||
|
||||
If the response reads beyond the end of the data, the response may be shorter than the requested range.
|
||||
The resulting ``Content-Range`` header will be consistent with the returned data.
|
||||
|
||||
If the response to a query is an empty range, the ``NO CONTENT`` (204) response code will be used.
|
||||
|
||||
|
||||
``POST /storage/v1/mutable/:storage_index/:share_number/corrupt``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Advise the server the data read from the indicated share was corrupt.
|
||||
Just like the immutable version.
|
||||
|
@ -693,44 +779,61 @@ Immutable Data
|
|||
|
||||
1. Create a bucket for storage index ``AAAAAAAAAAAAAAAA`` to hold two immutable shares, discovering that share ``1`` was already uploaded::
|
||||
|
||||
POST /v1/immutable/AAAAAAAAAAAAAAAA
|
||||
{"renew-secret": "efgh", "cancel-secret": "ijkl",
|
||||
"share-numbers": [1, 7], "allocated-size": 48}
|
||||
POST /storage/v1/immutable/AAAAAAAAAAAAAAAA
|
||||
Authorization: Tahoe-LAFS nurl-swissnum
|
||||
X-Tahoe-Authorization: lease-renew-secret efgh
|
||||
X-Tahoe-Authorization: lease-cancel-secret jjkl
|
||||
X-Tahoe-Authorization: upload-secret xyzf
|
||||
|
||||
{"share-numbers": [1, 7], "allocated-size": 48}
|
||||
|
||||
200 OK
|
||||
{"already-have": [1], "allocated": [7]}
|
||||
|
||||
#. Upload the content for immutable share ``7``::
|
||||
|
||||
PATCH /v1/immutable/AAAAAAAAAAAAAAAA/7
|
||||
PATCH /storage/v1/immutable/AAAAAAAAAAAAAAAA/7
|
||||
Authorization: Tahoe-LAFS nurl-swissnum
|
||||
Content-Range: bytes 0-15/48
|
||||
X-Tahoe-Authorization: upload-secret xyzf
|
||||
<first 16 bytes of share data>
|
||||
|
||||
200 OK
|
||||
{ "required": [ {"begin": 16, "end": 48 } ] }
|
||||
|
||||
PATCH /v1/immutable/AAAAAAAAAAAAAAAA/7
|
||||
PATCH /storage/v1/immutable/AAAAAAAAAAAAAAAA/7
|
||||
Authorization: Tahoe-LAFS nurl-swissnum
|
||||
Content-Range: bytes 16-31/48
|
||||
X-Tahoe-Authorization: upload-secret xyzf
|
||||
<second 16 bytes of share data>
|
||||
|
||||
200 OK
|
||||
{ "required": [ {"begin": 32, "end": 48 } ] }
|
||||
|
||||
PATCH /v1/immutable/AAAAAAAAAAAAAAAA/7
|
||||
PATCH /storage/v1/immutable/AAAAAAAAAAAAAAAA/7
|
||||
Authorization: Tahoe-LAFS nurl-swissnum
|
||||
Content-Range: bytes 32-47/48
|
||||
X-Tahoe-Authorization: upload-secret xyzf
|
||||
<final 16 bytes of share data>
|
||||
|
||||
201 CREATED
|
||||
|
||||
#. Download the content of the previously uploaded immutable share ``7``::
|
||||
|
||||
GET /v1/immutable/AAAAAAAAAAAAAAAA?share=7&offset=0&size=48
|
||||
GET /storage/v1/immutable/AAAAAAAAAAAAAAAA?share=7
|
||||
Authorization: Tahoe-LAFS nurl-swissnum
|
||||
Range: bytes=0-47
|
||||
|
||||
200 OK
|
||||
Content-Range: bytes 0-47/48
|
||||
<complete 48 bytes of previously uploaded data>
|
||||
|
||||
#. Renew the lease on all immutable shares in bucket ``AAAAAAAAAAAAAAAA``::
|
||||
|
||||
PUT /v1/lease/AAAAAAAAAAAAAAAA
|
||||
{"renew-secret": "efgh", "cancel-secret": "ijkl"}
|
||||
PUT /storage/v1/lease/AAAAAAAAAAAAAAAA
|
||||
Authorization: Tahoe-LAFS nurl-swissnum
|
||||
X-Tahoe-Authorization: lease-cancel-secret jjkl
|
||||
X-Tahoe-Authorization: lease-renew-secret efgh
|
||||
|
||||
204 NO CONTENT
|
||||
|
||||
|
@ -742,13 +845,13 @@ The special test vector of size 1 but empty bytes will only pass
|
|||
if there is no existing share,
|
||||
otherwise it will read a byte which won't match `b""`::
|
||||
|
||||
POST /v1/mutable/BBBBBBBBBBBBBBBB/read-test-write
|
||||
POST /storage/v1/mutable/BBBBBBBBBBBBBBBB/read-test-write
|
||||
Authorization: Tahoe-LAFS nurl-swissnum
|
||||
X-Tahoe-Authorization: write-enabler abcd
|
||||
X-Tahoe-Authorization: lease-cancel-secret efgh
|
||||
X-Tahoe-Authorization: lease-renew-secret ijkl
|
||||
|
||||
{
|
||||
"secrets": {
|
||||
"write-enabler": "abcd",
|
||||
"lease-renew": "efgh",
|
||||
"lease-cancel": "ijkl"
|
||||
},
|
||||
"test-write-vectors": {
|
||||
3: {
|
||||
"test": [{
|
||||
|
@ -774,13 +877,13 @@ otherwise it will read a byte which won't match `b""`::
|
|||
|
||||
#. Safely rewrite the contents of a known version of mutable share number ``3`` (or fail)::
|
||||
|
||||
POST /v1/mutable/BBBBBBBBBBBBBBBB/read-test-write
|
||||
POST /storage/v1/mutable/BBBBBBBBBBBBBBBB/read-test-write
|
||||
Authorization: Tahoe-LAFS nurl-swissnum
|
||||
X-Tahoe-Authorization: write-enabler abcd
|
||||
X-Tahoe-Authorization: lease-cancel-secret efgh
|
||||
X-Tahoe-Authorization: lease-renew-secret ijkl
|
||||
|
||||
{
|
||||
"secrets": {
|
||||
"write-enabler": "abcd",
|
||||
"lease-renew": "efgh",
|
||||
"lease-cancel": "ijkl"
|
||||
},
|
||||
"test-write-vectors": {
|
||||
3: {
|
||||
"test": [{
|
||||
|
@ -806,13 +909,20 @@ otherwise it will read a byte which won't match `b""`::
|
|||
|
||||
#. Download the contents of share number ``3``::
|
||||
|
||||
GET /v1/mutable/BBBBBBBBBBBBBBBB?share=3&offset=0&size=10
|
||||
GET /storage/v1/mutable/BBBBBBBBBBBBBBBB?share=3
|
||||
Authorization: Tahoe-LAFS nurl-swissnum
|
||||
Range: bytes=0-16
|
||||
|
||||
200 OK
|
||||
Content-Range: bytes 0-15/16
|
||||
<complete 16 bytes of previously uploaded data>
|
||||
|
||||
#. Renew the lease on previously uploaded mutable share in slot ``BBBBBBBBBBBBBBBB``::
|
||||
|
||||
PUT /v1/lease/BBBBBBBBBBBBBBBB
|
||||
{"renew-secret": "efgh", "cancel-secret": "ijkl"}
|
||||
PUT /storage/v1/lease/BBBBBBBBBBBBBBBB
|
||||
Authorization: Tahoe-LAFS nurl-swissnum
|
||||
X-Tahoe-Authorization: lease-cancel-secret efgh
|
||||
X-Tahoe-Authorization: lease-renew-secret ijkl
|
||||
|
||||
204 NO CONTENT
|
||||
|
||||
|
|
|
@ -3,9 +3,8 @@
|
|||
Release Checklist
|
||||
=================
|
||||
|
||||
These instructions were produced while making the 1.15.0 release. They
|
||||
are based on the original instructions (in old revisions in the file
|
||||
`docs/how_to_make_a_tahoe-lafs_release.org`).
|
||||
This release checklist specifies a series of checks that anyone engaged in
|
||||
releasing a version of Tahoe should follow.
|
||||
|
||||
Any contributor can do the first part of the release preparation. Only
|
||||
certain contributors can perform other parts. These are the two main
|
||||
|
@ -13,9 +12,12 @@ sections of this checklist (and could be done by different people).
|
|||
|
||||
A final section describes how to announce the release.
|
||||
|
||||
This checklist is based on the original instructions (in old revisions in the file
|
||||
`docs/how_to_make_a_tahoe-lafs_release.org`).
|
||||
|
||||
|
||||
Any Contributor
|
||||
---------------
|
||||
===============
|
||||
|
||||
Anyone who can create normal PRs should be able to complete this
|
||||
portion of the release process.
|
||||
|
@ -32,13 +34,35 @@ Tuesday if you want to get anything in").
|
|||
|
||||
- Create a ticket for the release in Trac
|
||||
- Ticket number needed in next section
|
||||
- Making first release? See `GPG Setup Instructions <gpg-setup.rst>`__ to make sure you can sign releases. [One time setup]
|
||||
|
||||
Get a clean checkout
|
||||
````````````````````
|
||||
|
||||
The release proccess involves compressing source files and putting them in formats
|
||||
suitable for distribution such as ``.tar.gz`` and ``zip``. That said, it's neccesary to
|
||||
the release process begins with a clean checkout to avoid making a release with
|
||||
previously generated files.
|
||||
|
||||
- Inside the tahoe root dir run ``git clone . ../tahoe-release-x.x.x`` where (x.x.x is the release number such as 1.16.0).
|
||||
|
||||
.. note::
|
||||
The above command would create a new directory at the same level as your original clone named ``tahoe-release-x.x.x``. You can name this folder however you want but it would be a good
|
||||
practice to give it the release name. You MAY also discard this directory once the release
|
||||
process is complete.
|
||||
|
||||
Get into the release directory and install dependencies by running
|
||||
|
||||
- cd ../tahoe-release-x.x.x (assuming you are still in your original clone)
|
||||
- python -m venv venv
|
||||
- ./venv/bin/pip install --editable .[test]
|
||||
|
||||
|
||||
Create Branch and Apply Updates
|
||||
```````````````````````````````
|
||||
|
||||
- Create a branch for release-candidates (e.g. `XXXX.release-1.15.0.rc0`)
|
||||
- run `tox -e news` to produce a new NEWS.txt file (this does a commit)
|
||||
- Create a branch for the release/candidate (e.g. ``XXXX.release-1.16.0``)
|
||||
- run tox -e news to produce a new NEWS.txt file (this does a commit)
|
||||
- create the news for the release
|
||||
|
||||
- newsfragments/<ticket number>.minor
|
||||
|
@ -46,7 +70,7 @@ Create Branch and Apply Updates
|
|||
|
||||
- manually fix NEWS.txt
|
||||
|
||||
- proper title for latest release ("Release 1.15.0" instead of "Release ...post1432")
|
||||
- proper title for latest release ("Release 1.16.0" instead of "Release ...post1432")
|
||||
- double-check date (maybe release will be in the future)
|
||||
- spot-check the release notes (these come from the newsfragments
|
||||
files though so don't do heavy editing)
|
||||
|
@ -54,7 +78,7 @@ Create Branch and Apply Updates
|
|||
|
||||
- update "relnotes.txt"
|
||||
|
||||
- update all mentions of 1.14.0 -> 1.15.0
|
||||
- update all mentions of ``1.16.0`` to new and higher release version for example ``1.16.1``
|
||||
- update "previous release" statement and date
|
||||
- summarize major changes
|
||||
- commit it
|
||||
|
@ -63,14 +87,7 @@ Create Branch and Apply Updates
|
|||
|
||||
- change the value given for `version` from `OLD.post1` to `NEW.post1`
|
||||
|
||||
- update "CREDITS"
|
||||
|
||||
- are there any new contributors in this release?
|
||||
- one way: git log release-1.14.0.. | grep Author | sort | uniq
|
||||
- commit it
|
||||
|
||||
- update "docs/known_issues.rst" if appropriate
|
||||
- update "docs/Installation/install-tahoe.rst" references to the new release
|
||||
- Push the branch to github
|
||||
- Create a (draft) PR; this should trigger CI (note that github
|
||||
doesn't let you create a PR without some changes on the branch so
|
||||
|
@ -95,23 +112,33 @@ they will need to evaluate which contributors' signatures they trust.
|
|||
- (all steps above are completed)
|
||||
- sign the release
|
||||
|
||||
- git tag -s -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A -m "release Tahoe-LAFS-1.15.0rc0" tahoe-lafs-1.15.0rc0
|
||||
- (replace the key-id above with your own)
|
||||
- git tag -s -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A -m "release Tahoe-LAFS-1.16.0rc0" tahoe-lafs-1.16.0rc0
|
||||
|
||||
.. note::
|
||||
- Replace the key-id above with your own, which can simply be your email if it's attached to your fingerprint.
|
||||
- Don't forget to put the correct tag message and name. In this example, the tag message is "release Tahoe-LAFS-1.16.0rc0" and the tag name is ``tahoe-lafs-1.16.0rc0``
|
||||
|
||||
- build all code locally
|
||||
|
||||
- these should all pass:
|
||||
|
||||
- tox -e py27,codechecks,docs,integration
|
||||
- tox -e py37,codechecks,docs,integration
|
||||
|
||||
- these can fail (ideally they should not of course):
|
||||
|
||||
- tox -e deprecations,upcoming-deprecations
|
||||
|
||||
- clone to a clean, local checkout (to avoid extra files being included in the release)
|
||||
|
||||
- cd /tmp
|
||||
- git clone /home/meejah/src/tahoe-lafs
|
||||
|
||||
- build tarballs
|
||||
|
||||
- tox -e tarballs
|
||||
- confirm it at least exists:
|
||||
- ls dist/ | grep 1.15.0rc0
|
||||
- Confirm that release tarballs exist by runnig:
|
||||
|
||||
- ls dist/ | grep 1.16.0rc0
|
||||
|
||||
- inspect and test the tarballs
|
||||
|
||||
|
@ -120,14 +147,12 @@ they will need to evaluate which contributors' signatures they trust.
|
|||
|
||||
- when satisfied, sign the tarballs:
|
||||
|
||||
- gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0-py2-none-any.whl
|
||||
- gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0.tar.bz2
|
||||
- gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0.tar.gz
|
||||
- gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0.zip
|
||||
- gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.16.0rc0-py2.py3-none-any.whl
|
||||
- gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.16.0rc0.tar.gz
|
||||
|
||||
|
||||
Privileged Contributor
|
||||
-----------------------
|
||||
======================
|
||||
|
||||
Steps in this portion require special access to keys or
|
||||
infrastructure. For example, **access to tahoe-lafs.org** to upload
|
||||
|
@ -155,14 +180,20 @@ need to be uploaded to https://tahoe-lafs.org in `~source/downloads`
|
|||
|
||||
- secure-copy all release artifacts to the download area on the
|
||||
tahoe-lafs.org host machine. `~source/downloads` on there maps to
|
||||
https://tahoe-lafs.org/downloads/ on the Web.
|
||||
- scp dist/*1.15.0* username@tahoe-lafs.org:/home/source/downloads
|
||||
https://tahoe-lafs.org/downloads/ on the Web:
|
||||
|
||||
- scp dist/*1.15.0* username@tahoe-lafs.org:/home/source/downloads
|
||||
|
||||
- the following developers have access to do this:
|
||||
|
||||
- exarkun
|
||||
- meejah
|
||||
- warner
|
||||
|
||||
Push the signed tag to the main repository:
|
||||
|
||||
- git push origin tahoe-lafs-1.17.1
|
||||
|
||||
For the actual release, the tarball and signature files need to be
|
||||
uploaded to PyPI as well.
|
||||
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
sphinx
|
||||
docutils<0.18 # https://github.com/sphinx-doc/sphinx/issues/9788
|
||||
recommonmark
|
||||
sphinx_rtd_theme
|
|
@ -124,6 +124,35 @@ Tahoe-LAFS.
|
|||
.. _magic wormhole: https://magic-wormhole.io/
|
||||
|
||||
|
||||
Multiple Instances
|
||||
------------------
|
||||
|
||||
Running multiple instances against the same configuration directory isn't supported.
|
||||
This will lead to undefined behavior and could corrupt the configuration or state.
|
||||
|
||||
We attempt to avoid this situation with a "pidfile"-style file in the config directory called ``running.process``.
|
||||
There may be a parallel file called ``running.process.lock`` in existence.
|
||||
|
||||
The ``.lock`` file exists to make sure only one process modifies ``running.process`` at once.
|
||||
The lock file is managed by the `lockfile <https://pypi.org/project/lockfile/>`_ library.
|
||||
If you wish to make use of ``running.process`` for any reason you should also lock it and follow the semantics of lockfile.
|
||||
|
||||
If ``running.process`` exists then it contains the PID and the creation-time of the process.
|
||||
When no such file exists, there is no other process running on this configuration.
|
||||
If there is a ``running.process`` file, it may be a leftover file or it may indicate that another process is running against this config.
|
||||
To tell the difference, determine if the PID in the file exists currently.
|
||||
If it does, check the creation-time of the process versus the one in the file.
|
||||
If these match, there is another process currently running and using this config.
|
||||
Otherwise, the file is stale -- it should be removed before starting Tahoe-LAFS.
|
||||
|
||||
Some example Python code to check the above situations:
|
||||
|
||||
.. literalinclude:: check_running.py
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
A note about small grids
|
||||
------------------------
|
||||
|
||||
|
|
|
@ -7,6 +7,8 @@ These are not to be confused with the URI-like capabilities Tahoe-LAFS uses to r
|
|||
An attempt is also made to outline the rationale for certain choices about these URLs.
|
||||
The intended audience for this document is Tahoe-LAFS maintainers and other developers interested in interoperating with Tahoe-LAFS or these URLs.
|
||||
|
||||
.. _furls:
|
||||
|
||||
Background
|
||||
----------
|
||||
|
||||
|
@ -31,6 +33,8 @@ The client's use of the swissnum is what allows the server to authorize the clie
|
|||
|
||||
.. _`swiss number`: http://wiki.erights.org/wiki/Swiss_number
|
||||
|
||||
.. _NURLs:
|
||||
|
||||
NURLs
|
||||
-----
|
||||
|
||||
|
@ -47,27 +51,27 @@ This can be considered to expand to "**N**\ ew URLs" or "Authe\ **N**\ ticating
|
|||
The anticipated use for a **NURL** will still be to establish a TLS connection to a peer.
|
||||
The protocol run over that TLS connection could be Foolscap though it is more likely to be an HTTP-based protocol (such as GBS).
|
||||
|
||||
Unlike fURLs, only a single net-loc is included, for consistency with other forms of URLs.
|
||||
As a result, multiple NURLs may be available for a single server.
|
||||
|
||||
Syntax
|
||||
------
|
||||
|
||||
The EBNF for a NURL is as follows::
|
||||
|
||||
nurl = scheme, hash, "@", net-loc-list, "/", swiss-number, [ version1 ]
|
||||
|
||||
scheme = "pb://"
|
||||
nurl = tcp-nurl | tor-nurl | i2p-nurl
|
||||
tcp-nurl = "pb://", hash, "@", tcp-loc, "/", swiss-number, [ version1 ]
|
||||
tor-nurl = "pb+tor://", hash, "@", tcp-loc, "/", swiss-number, [ version1 ]
|
||||
i2p-nurl = "pb+i2p://", hash, "@", i2p-loc, "/", swiss-number, [ version1 ]
|
||||
|
||||
hash = unreserved
|
||||
|
||||
net-loc-list = net-loc, [ { ",", net-loc } ]
|
||||
net-loc = tcp-loc | tor-loc | i2p-loc
|
||||
|
||||
tcp-loc = [ "tcp:" ], hostname, [ ":" port ]
|
||||
tor-loc = "tor:", hostname, [ ":" port ]
|
||||
i2p-loc = "i2p:", i2p-addr, [ ":" port ]
|
||||
|
||||
i2p-addr = { unreserved }, ".i2p"
|
||||
tcp-loc = hostname, [ ":" port ]
|
||||
hostname = domain | IPv4address | IPv6address
|
||||
|
||||
i2p-loc = i2p-addr, [ ":" port ]
|
||||
i2p-addr = { unreserved }, ".i2p"
|
||||
|
||||
swiss-number = segment
|
||||
|
||||
version1 = "#v=1"
|
||||
|
@ -87,11 +91,13 @@ These differences are separated into distinct versions.
|
|||
Version 0
|
||||
---------
|
||||
|
||||
A Foolscap fURL is considered the canonical definition of a version 0 NURL.
|
||||
In theory, a Foolscap fURL with a single netloc is considered the canonical definition of a version 0 NURL.
|
||||
Notably,
|
||||
the hash component is defined as the base32-encoded SHA1 hash of the DER form of an x509v3 certificate.
|
||||
A version 0 NURL is identified by the absence of the ``v=1`` fragment.
|
||||
|
||||
In practice, real world fURLs may have more than one netloc, so lack of version fragment will likely just involve dispatching the fURL to a different parser.
|
||||
|
||||
Examples
|
||||
~~~~~~~~
|
||||
|
||||
|
@ -103,11 +109,8 @@ Version 1
|
|||
|
||||
The hash component of a version 1 NURL differs in three ways from the prior version.
|
||||
|
||||
1. The hash function used is SHA3-224 instead of SHA1.
|
||||
The security of SHA1 `continues to be eroded`_.
|
||||
Contrariwise SHA3 is currently the most recent addition to the SHA family by NIST.
|
||||
The 224 bit instance is chosen to keep the output short and because it offers greater collision resistance than SHA1 was thought to offer even at its inception
|
||||
(prior to security research showing actual collision resistance is lower).
|
||||
1. The hash function used is SHA-256, to match RFC 7469.
|
||||
The security of SHA1 `continues to be eroded`_; Latacora `SHA-2`_.
|
||||
2. The hash is computed over the certificate's SPKI instead of the whole certificate.
|
||||
This allows certificate re-generation so long as the public key remains the same.
|
||||
This is useful to allow contact information to be updated or extension of validity period.
|
||||
|
@ -122,7 +125,7 @@ The hash component of a version 1 NURL differs in three ways from the prior vers
|
|||
*all* certificate fields should be considered within the context of the relationship identified by the SPKI hash.
|
||||
|
||||
3. The hash is encoded using urlsafe-base64 (without padding) instead of base32.
|
||||
This provides a more compact representation and minimizes the usability impacts of switching from a 160 bit hash to a 224 bit hash.
|
||||
This provides a more compact representation and minimizes the usability impacts of switching from a 160 bit hash to a 256 bit hash.
|
||||
|
||||
A version 1 NURL is identified by the presence of the ``v=1`` fragment.
|
||||
Though the length of the hash string (38 bytes) could also be used to differentiate it from a version 0 NURL,
|
||||
|
@ -140,7 +143,8 @@ Examples
|
|||
* ``pb://azEu8vlRpnEeYm0DySQDeNY3Z2iJXHC_bsbaAw@localhost:47877/64i4aokv4ej#v=1``
|
||||
|
||||
.. _`continues to be eroded`: https://en.wikipedia.org/wiki/SHA-1#Cryptanalysis_and_validation
|
||||
.. _`explored by the web community`: https://www.imperialviolet.org/2011/05/04/pinning.html
|
||||
.. _`SHA-2`: https://latacora.micro.blog/2018/04/03/cryptographic-right-answers.html
|
||||
.. _`explored by the web community`: https://www.rfc-editor.org/rfc/rfc7469
|
||||
.. _Foolscap: https://github.com/warner/foolscap
|
||||
|
||||
.. [1] ``foolscap.furl.decode_furl`` is taken as the canonical definition of the syntax of a fURL.
|
||||
|
|
|
@ -264,3 +264,18 @@ the "tahoe-conf" file for notes about configuration and installing these
|
|||
plugins into a Munin environment.
|
||||
|
||||
.. _Munin: http://munin-monitoring.org/
|
||||
|
||||
|
||||
Scraping Stats Values in OpenMetrics Format
|
||||
===========================================
|
||||
|
||||
Time Series DataBase (TSDB) software like Prometheus_ and VictoriaMetrics_ can
|
||||
parse statistics from the e.g. http://localhost:3456/statistics?t=openmetrics
|
||||
URL in OpenMetrics_ format. Software like Grafana_ can then be used to graph
|
||||
and alert on these numbers. You can find a pre-configured dashboard for
|
||||
Grafana at https://grafana.com/grafana/dashboards/16894-tahoe-lafs/.
|
||||
|
||||
.. _OpenMetrics: https://openmetrics.io/
|
||||
.. _Prometheus: https://prometheus.io/
|
||||
.. _VictoriaMetrics: https://victoriametrics.com/
|
||||
.. _Grafana: https://grafana.com/
|
||||
|
|
|
@ -353,10 +353,23 @@ def storage_nodes(reactor, temp_dir, introducer, introducer_furl, flog_gatherer,
|
|||
nodes.append(process)
|
||||
return nodes
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def alice_sftp_client_key_path(temp_dir):
|
||||
# The client SSH key path is typically going to be somewhere else (~/.ssh,
|
||||
# typically), but for convenience sake for testing we'll put it inside node.
|
||||
return join(temp_dir, "alice", "private", "ssh_client_rsa_key")
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@log_call(action_type=u"integration:alice", include_args=[], include_result=False)
|
||||
def alice(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, request):
|
||||
def alice(
|
||||
reactor,
|
||||
temp_dir,
|
||||
introducer_furl,
|
||||
flog_gatherer,
|
||||
storage_nodes,
|
||||
alice_sftp_client_key_path,
|
||||
request,
|
||||
):
|
||||
process = pytest_twisted.blockon(
|
||||
_create_node(
|
||||
reactor, request, temp_dir, introducer_furl, flog_gatherer, "alice",
|
||||
|
@ -387,19 +400,13 @@ accounts.file = {accounts_path}
|
|||
""".format(ssh_key_path=host_ssh_key_path, accounts_path=accounts_path))
|
||||
generate_ssh_key(host_ssh_key_path)
|
||||
|
||||
# 3. Add a SFTP access file with username/password and SSH key auth.
|
||||
|
||||
# The client SSH key path is typically going to be somewhere else (~/.ssh,
|
||||
# typically), but for convenience sake for testing we'll put it inside node.
|
||||
client_ssh_key_path = join(process.node_dir, "private", "ssh_client_rsa_key")
|
||||
generate_ssh_key(client_ssh_key_path)
|
||||
# 3. Add a SFTP access file with an SSH key for auth.
|
||||
generate_ssh_key(alice_sftp_client_key_path)
|
||||
# Pub key format is "ssh-rsa <thekey> <username>". We want the key.
|
||||
ssh_public_key = open(client_ssh_key_path + ".pub").read().strip().split()[1]
|
||||
ssh_public_key = open(alice_sftp_client_key_path + ".pub").read().strip().split()[1]
|
||||
with open(accounts_path, "w") as f:
|
||||
f.write("""\
|
||||
alice password {rwcap}
|
||||
|
||||
alice2 ssh-rsa {ssh_public_key} {rwcap}
|
||||
alice-key ssh-rsa {ssh_public_key} {rwcap}
|
||||
""".format(rwcap=rwcap, ssh_public_key=ssh_public_key))
|
||||
|
||||
# 4. Restart the node with new SFTP config.
|
||||
|
@ -455,10 +462,8 @@ def chutney(reactor, temp_dir):
|
|||
)
|
||||
pytest_twisted.blockon(proto.done)
|
||||
|
||||
# XXX: Here we reset Chutney to the last revision known to work
|
||||
# with Python 2, as a workaround for Chutney moving to Python 3.
|
||||
# When this is no longer necessary, we will have to drop this and
|
||||
# add '--depth=1' back to the above 'git clone' subprocess.
|
||||
# XXX: Here we reset Chutney to a specific revision known to work,
|
||||
# since there are no stability guarantees or releases yet.
|
||||
proto = _DumpOutputProtocol(None)
|
||||
reactor.spawnProcess(
|
||||
proto,
|
||||
|
@ -466,7 +471,7 @@ def chutney(reactor, temp_dir):
|
|||
(
|
||||
'git', '-C', chutney_dir,
|
||||
'reset', '--hard',
|
||||
'99bd06c7554b9113af8c0877b6eca4ceb95dcbaa'
|
||||
'c825cba0bcd813c644c6ac069deeb7347d3200ee'
|
||||
),
|
||||
env=environ,
|
||||
)
|
||||
|
|
|
@ -1,794 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/
|
||||
set -euxo pipefail
|
||||
|
||||
CODENAME=$(lsb_release --short --codename)
|
||||
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
SUDO="sudo"
|
||||
else
|
||||
SUDO=""
|
||||
fi
|
||||
|
||||
# Script to install Tor
|
||||
echo "deb http://deb.torproject.org/torproject.org ${CODENAME} main" | ${SUDO} tee -a /etc/apt/sources.list
|
||||
echo "deb-src http://deb.torproject.org/torproject.org ${CODENAME} main" | ${SUDO} tee -a /etc/apt/sources.list
|
||||
|
||||
# # Install Tor repo signing key
|
||||
${SUDO} apt-key add - <<EOF
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQENBEqg7GsBCACsef8koRT8UyZxiv1Irke5nVpte54TDtTl1za1tOKfthmHbs2I
|
||||
4DHWG3qrwGayw+6yb5mMFe0h9Ap9IbilA5a1IdRsdDgViyQQ3kvdfoavFHRxvGON
|
||||
tknIyk5Goa36GMBl84gQceRs/4Zx3kxqCV+JYXE9CmdkpkVrh2K3j5+ysDWfD/kO
|
||||
dTzwu3WHaAwL8d5MJAGQn2i6bTw4UHytrYemS1DdG/0EThCCyAnPmmb8iBkZlSW8
|
||||
6MzVqTrN37yvYWTXk6MwKH50twaX5hzZAlSh9eqRjZLq51DDomO7EumXP90rS5mT
|
||||
QrS+wiYfGQttoZfbh3wl5ZjejgEjx+qrnOH7ABEBAAG0JmRlYi50b3Jwcm9qZWN0
|
||||
Lm9yZyBhcmNoaXZlIHNpZ25pbmcga2V5iQE8BBMBAgAmBQJKoOxrAhsDBQkJZgGA
|
||||
BgsJCAcDAgQVAggDBBYCAwECHgECF4AACgkQ7oy8noht3YmVUAgApMyyFaBxvie1
|
||||
/jAMoQ3uZLjnrP/SWK9Sv9TIiiJxig4PLSNn+dlu1EZicFoZaGx+wLMhOOuCoLKA
|
||||
Vfo3RSF2WgvBePkxqN03hILPAVuT2kus+7f7y926lkRy2mF+eWVd5CZDoHERABFt
|
||||
gX0Zf24TBz90Cza1tu+1OWiYgD7zi24AIlFwcU4Up9+ejZWGSG4J3yOZj5xkEAxg
|
||||
5RDKfkbsRVV+ZnqaxcDqe+Gpu4BFEiNv1r/OyZIA8FbWEjn0rnXDA4ynOsown9pa
|
||||
QE0NrMIHrh6fR9+CUyeFzn+xFhPaNho7k8GAzC02WctTGX5lZRBaLt7MDC1i6eaj
|
||||
VcC1eXgtPYhMBBMRAgAMBQJKoO50BYMJZf93AAoJEN56r26UwJx/hiQAoMT5EmxK
|
||||
flkAi2UywT99PuQGp3ckAJ4jJubPJNnHFeCNZ6/TtKmHoziU4okBPAQTAQIAJgIb
|
||||
AwYLCQgHAwIEFQIIAwQWAgMBAh4BAheABQJQPjNuBQkNIhUAAAoJEO6MvJ6Ibd2J
|
||||
GbAH/2fjtebQ7xsC8zUTnjIk8jmeH8kNZcp1KTkt31CZd6jN9KFj5dbSuaXQGYMJ
|
||||
Xi9AqPHdux79eM6QjsMCN4bYJe3bA/CEueuL9bBxsfl9any8yJ8BcSJVcc61W4VD
|
||||
Xi0iogSeqsHGagCHqXkti7/pd5RCzr42x0OG8eQ6qFWZ9LlKpLIdz5MjfQ7uJWdl
|
||||
hok5taSFg8WPJCSIMaQxRC93uYv3CEMusLH3hNjcNk9KqMZ/rFkr8AVIo7X6tCuN
|
||||
cOI6RLJ5o4mUNJflU8HKBpRf6ELhJAFfhV0Ai8Numtmj1F4s7bZTyDSfCYjc5evI
|
||||
/BWjJ6pGhQMyX32zPA9VDmVXZp2IRgQQEQIABgUCSqqiMgAKCRDrWolqKJiL9aY6
|
||||
AJ9PJ/c0nvAdMFyTAB4TgxK3lm1dWwCfRcOrw9ZaeTicrpOV6+or9WhYi0WIRgQQ
|
||||
EQIABgUCSqxgNQAKCRA7nQk/MbCXS+gnAJwJKiSIlI1j7IivecE838smV1vF6QCb
|
||||
B9TrQZ5pYXDPuGrBUUvbfF5OnKeIRgQQEQIABgUCS32d2AAKCRBiFZZPWxcqsjlM
|
||||
AJ9wE9uxo8DUBRVVdc+/Qp5YViBVogCgyvePB3U1hUPpN7cP7ImEbPMIPo+IRgQQ
|
||||
EQIABgUCS36WLwAKCRBOUwAZoaG8BTgXAJ9fcfgaCb/HTIgC+a3gJbwA/0XkPwCg
|
||||
pqm7BuOwadxPdR00WIeaKcBqrW2IRgQQEQIABgUCTLqaOwAKCRCF9yYxJ6HImkv6
|
||||
AKCDGzgLmj47OeTtaYWs9DeVud8MogCeLinbRpG7DHpBYYfyGiWPNNKabkWIXgQQ
|
||||
EQgABgUCTMEPxgAKCRBrN4EsW1TWjEMIAP4pRDudJEmpk5jQIjqcAPu1qT0rsmWT
|
||||
Q5ElxPeLpkPIPwD/fdoFfMzDSSdNN0noO595BgFMwr4I1cz6GSsd8GCA8NeIXgQQ
|
||||
EQgABgUCTgyF3gAKCRCDojkL/aKKGg2vAQDM6swxNsKGjw6wb+0PGCeXBj3H+QEi
|
||||
oJ8J0outkIyT1QD+L5gYFAIeDUxpnNmt9tJ6gTv+rJk5gNjOrvz7QTXpYtmInAQQ
|
||||
AQIABgUCTNR85QAKCRDjsV6KbxD8QmnaA/90V7ITTZGfdbvbe7/usuyzr26e59gt
|
||||
HmsRdSxJn7zG3vng+tMjjDTapwY4vTk/5s7BshlGFT2Vw1kl61VhC0vf+wFUAgGh
|
||||
lV7cH7DQyJNaBFdxJ0nz0XJ+gbKjDN2gA7tK5VbAD8j8M/sJG6m8cLmFml59+v+e
|
||||
Yo4VA/Xfl5qRYIkBHAQQAQIABgUCTJFqpgAKCRBjkJvie11mayZaCADGDPzdfisD
|
||||
nVPK68hnJ7vx1uCgdkMKyAJmNXca0twIiYl1oKmZ961h1Y5qUJOj02AjtxKgeI+b
|
||||
1hRwGAxQ4uS8bHtYj6Pn/mXK0Q3G1lw0Q6M+q4mDdj3zLAeHR/WoyCQTFWX2gmgd
|
||||
46C0XQkqpfY9mmfPZxpKoilMxlhX4z6TxxYRiwbxZOB/jwhZMCNMoXx5SYDC3Aco
|
||||
RqXWd3wCwwy5lsv8XC33cQd/c+XbJIC4hzu4lTj3ndDlptpJp9SPNSUiNe8YD0sw
|
||||
SITX+R1uzO4l6LLavw02j/MAhfVi3dEpf8lt4ZKaRVUHB/XPTsUmxYx7jOtDyr+w
|
||||
/lPnZFMhAECDiQEcBBMBAgAGBQJM4UTLAAoJEE7GByMpYG5327oIAMDOuVYbMiL9
|
||||
anx0+sRuEEQZbY1otCoTCIf8rDEBAw0RBPYuXOfcMkHWNPzfoohW6qAjeEK831AS
|
||||
PVg3cta5Ctmn/mM2ehO3Y+XCEtenTZJP8ZtHg3pZEt4PtQaOBtrWxqX1h633KEIa
|
||||
0a7dASaU4KOZg/SyKoChcSr2pY+jtzDacsZ8q/et+zz2gktdvcDSkJurkPjlORx9
|
||||
CcWFhOd7PFP4ZWn0A0AkufMpbLXhlVJCmSykyyG0Don3C9i7sG045303KNy6CA+l
|
||||
jvcm/EBeeMWvLMdjr51XmkGFjaAs4Lyw0CfKj9uNZdriOtSVtH2kcMmNSvcUln2B
|
||||
FZTBo2NeRKGJAhwEEAECAAYFAktpE+EACgkQxel8K2OfamZhpg/+P9NPk88rqRnE
|
||||
uDVDHodlkA5hG0d0Yi5vkV9rw07yjYut474aUd3FjJFqNEoiW+6dFbNy6YqqYPhr
|
||||
XLtnfJl5LAUJUzMA2aSLtbuX+cq18DCv5ZmU4DW6kZOWi5vX7QkQCTTLP03VlcD3
|
||||
Gu6HyofseBMgE4zoEXdmZSZmPnOygakFLzC9w+D1XfK2gcaTKjAJJdW80aY56eUe
|
||||
zFDKLhOw+YzIK1/ZeeOTS4LeITtTq5J6/hnwHrJdjApX80v2WJzVVoy7lQbxAPsl
|
||||
JHZdYVFCBy2Tyk7kYdddVxYCcdYr0e8A+GfG/tQJGxvZ3O4nOrezSv0XmlhLZ5rj
|
||||
Cn8M6fg/NKUXsPtXiac+DQJbr5RwQ5Sc7bnPVsCywqetOeA+xv3L2wi94rg4u97Q
|
||||
iwqhDW0SE9zZuQL5vaXl/GFpaRXs+mVGATS9h+0lDBQPi21oPkdN/BKKzr//2GCl
|
||||
5VFb+rkOY65HthCuiIrT8jFGArJIF4nXku/4BPpNrganC89iTsd5+UUNFIlta+WY
|
||||
kENQ9tC2mwj96BaK0KyRQZP9AAzTo5wG8aouczptpwSH0aECJNy8kd/UR8IAkZkx
|
||||
jY4+zyfQDlb4aNDsVGvempgjFcNo0rciKrPQl5GyRLQj2azuv46gaGcYzqsobejS
|
||||
/2jqJLMnkTeExaCryrWuXo/raWBWQLOJAhwEEAECAAYFAkybgq4ACgkQ2HRyfjOa
|
||||
f6huKQ//Yfey5BJXqZqIt9i6tyw2VqzMtZ1gAqFdEKeuSmz30xty9g6KknIjpeZo
|
||||
+POb3rQFUKGZ/q4AjWKdD9C5WUvLcXd0RCWeDG7dmD78h35OWwqhc+8FXO1vU0nG
|
||||
yFdEx89cNiO42M/z+eYeoysgVL3ixbCjJlrN4MHrilqshxH5MvG7JfIfoPwucQyt
|
||||
NcwSa8T9kTlmC9uSl1rwEllKlDNabxMpsf+9T0kZtI+KQrvMBg8A4RRJhpP13Bt6
|
||||
y949FbR4zva7kqV24h+5c/bKsgY4PXXM+AnIuXy+Dq1aRVgRLhWypJqc73UnpD/M
|
||||
DDOPKX8nkF3F0mjcfEso6KtvNsniPCr5GKcnvoGu38qlQ7ILm2Pv0tjBHNIYQNG9
|
||||
xPn2TMH74D6f88NahHj33Ha7PG8Jn/dZMuKg7qEeHit7+lJDn18cTT8xIMMUpl9A
|
||||
pmjLuWwo5eTXysai7PQQU/ezEbOgYqznBKEFK+CXH6KINnGH13d/r9L71AZj/KZs
|
||||
I+c7E0imLwUStvJEZr2M9nR+ybA4SN6/kwcF5n2kx+lBJjqBn72hb0wyaXXtTYFG
|
||||
deruYIGsxEx8imbIBDtX6rWOMIrZAHlPBS5NTj4Hye14XcChR/AodmXrgJD/z+8+
|
||||
sDGGZpHAc291wknHO++j22vF47Q2VSt8T+WM6Tx8vq0+Wsnui/iJARwEEAECAAYF
|
||||
Ak6DrGQACgkQ/YT8uPW0MEdizQf+LRGpkyYcVnEXiFUUuJiMZlWSoTeFsFlTLdBV
|
||||
jxAlcTanW5PUZ1O+fzxhSTjtAgEZm1UJUv3RaJxGlMeOVV+1o6F7xzsaTOFajjAK
|
||||
DwrfP9WdvRyiC5IrvdfuJB6THCkgu5l0yoMxANyBXi9lEPHFPllOk6sTjfEk9LlJ
|
||||
Tn1Quy3c5qb9GJgiSbA+7sS6AO7woE52TxdAJjxB+PM1dt/FZGG4hjeH3WmjUtfa
|
||||
hm1UlBtWLEVleOz4EFXwTQErNpHfBaReJecOfJZ/30OGEJNWkNkmrg+ed1uLsE+K
|
||||
2DxEHTFCZd83OPQGHpi+qYcv9SDDMYxzzdlynkOn5DoR0z87N4kBnAQRAQoABgUC
|
||||
TqmiPwAKCRCg8hPxRutYH4lKC/9YYwjHjABrogdB2sb49JIiM2Dqe+G++GizVTZs
|
||||
mV26PJXWQLKr2zKZDMLk3l/b9YLVkuFeG2K035HPFCtpWIlxkxpbarI5i9F0NjMm
|
||||
gaIyqvh14xNhDS6NHgioDdNKvdNI5LYtWXGREjYJVCBIwdxWZHi5JsQgV2E0vfIZ
|
||||
GDKWFfMIF2xrt6x0uvhWZnD94ecU0Dd8sFz7TKJoCdzfdYpoj5ROenLGJ7OcDMUL
|
||||
knSA4NEVIEY0BVyQCb3TCjfboCRxRdXs+6yz4YEqTCzPNvQqIKKO6MA/X3ytmUok
|
||||
RZIVmU8es4iZxYUXrHKeMzrvYVpbwwHwpziGwBr+SOkrS5iv5c1V1Nb+pSajtzAm
|
||||
4tQnNoyjvB2YsEOvTLUNgaScY5O7Xu/FGhI6E9Y8KbD7nb2t9XdtEFgHiq1ST15t
|
||||
iew6YNCatVA/GW3r97ediBjqAX35hqFSZ05yaNDlCgfKxrRiv2SHu+hutAX7cVLT
|
||||
Aetm2mrJBb0ip7hQKrmUOpziT7iIXgQQEQoABgUCUVVRWQAKCRCHWDJ6EJ8lkdti
|
||||
AQCDqrwsq6QrE1puqjai8cGvIUdY5UWiBVj6IjrTmvAdlAD/WEqresRrwQdoPJ6x
|
||||
4VKJyJByQPCuJvlfl6nzpnBg2LyJARwEEAECAAYFAlEuf78ACgkQdxZ3RMno5CjA
|
||||
8Qf+LM8nZhjvJyGdngan05EKqwc5HAppi34pctNpSreJvNxSBXQ4vydVckvdAJNI
|
||||
ttGeWjVDr6Z61w6+h9rMoUwZkKMLU5wii5qJkvwGtPw5JZVe6ecEKJrr/p9tkMjI
|
||||
jTHeneYrm+zGJAx/F8eCy+CzWwGacLw1w68IHHH6zsJZRhyNlSBc9ZJANRzXRPWc
|
||||
0tzHfT7HtiN2dQK2OlFLRr+4t9KLFae0MsNRr4M6nBtOX+CBP4OdKTbeASyXnK8G
|
||||
bpnpEjn0b4isr6eoMcJbNwVBX4XnI5RG/Ugur4es9ktOQkUFxy8Zpp8/vk/+hyWH
|
||||
unr1G2ema2dak8zHIa7G2T8Bb4kCGwQQAQIABgUCUVSNVAAKCRB+fTNcWi1ewX4x
|
||||
D/d0R2OHFLo42KJPsIc9Wz3AMO7mfpbCmSXcxoM+Cyd9/GT2qgAt9hgItv3iqg9d
|
||||
j+AbjPNUKfpGG4Q4D/x/tb018C3F4U1PLC/PQ2lYX0csvuv3Gp5MuNpCuHS5bW4k
|
||||
LyOpRZh1JrqniL8K1Mp8cdBhMf6H+ZckQuXShGHwOhGyBMu3X7biXikSvdgQmbDQ
|
||||
MtaDbxuYZ+JGXF0uacPVnlAUwW1F55IIhmUHIV7t+poYo/8M0HJ/lB9y5auamrJT
|
||||
4acsPWS+fYHAjfGfpSE7T7QWuiIKJ2EmpVa5hpGhzII9ahF0wtHTKkF7d7RYV1p1
|
||||
UUA5nu8QFTope8fyERJDZg88ICt+TpXJ7+PJ9THcXgNI+papKy2wKHPfly6B+071
|
||||
BA4n0UX0tV7zqWk9axoN+nyUL97/k572kLTbxahrBEYXphdNeqqXHa/udWpTYaKw
|
||||
SGYmIohTSIqBZh7Xa/rhLsx2UfgR5B0WW34E8cTzuiZziYalIC/9694vjOtPaSTp
|
||||
iPyK2Bn/gOF6zXEqtUYPTdVfYADyhD00uNAxAsmgmju+KkoYl6j4oG3a71LZWcdQ
|
||||
+hx3n+TgpNx51hXlqdv8g1HmkGM5KJW31ZgxfPmqgO6JfUiWucRaGHNjA2AdinU+
|
||||
pFq9rlIaHWaxG+xw+tFNtdTDxmmzaj2pCsYUz/qTAN31iQIcBBABAgAGBQJNGJ3w
|
||||
AAoJEIO1uBYaG9UOMXcP/0kA1SRdYd24ORdRdkVyhI8QqBE49+seV3iElKsk6e54
|
||||
auaQDhpSFXfCLbSY2tmEnxD2AWDVwUDHtBPuKXREr8ytB44MKVm5Ar7M1o/ner+R
|
||||
JsMdYR1bxLxF4j5MuPgTLaZKEszxmI5C+eo8wvf5heFwtIq23HxO+7DtYO2XKWLj
|
||||
/k7Q3K760YvLtO72awqfMXr+MxX57/L6qyWdiMNfNiT1uGv9BpixRGB6xbDN18un
|
||||
pVKk3sLPcE3oc44UdkSuxVrqHXVMzUIxpQGqOf+KYk9s5Z0KijllK09uoZI3WyKO
|
||||
R2I5iGJDuBBzbuMGP23Gr3IMRTmVNAEWmjpxgLC2j1t80ocaAkguejTAKTjjXH1M
|
||||
WJHoESsBXKdbk2xuAvnvqQqZ7weZfLCBS4XoSGdg3teeGa/ZQOHDknrLurqaa2ah
|
||||
FGxcG4lOrf0OBZWMaI9Kj3HnrcThmEOwIozL4SDmUvvQxyK5s3uZjphFAyxRhQx1
|
||||
fCKhnyA+D8oVtnTZ9uxtUWstIKK5RlOCxWJH3obvEGmGi+6E+zgDsK+ivqM8gFjj
|
||||
3XmMpO6dh3/yZ6B8b8kanj4cYlCHhpeJ7v16G+FvGh/aMBlCopXAvoTprxQgXa12
|
||||
MgYzYGRyuviOV+PWo+RTTPRyYmJ9RLADKSdHwA8VUvHp+nxZucES1M9PxVq92hhW
|
||||
iQIcBBABAgAGBQJQezFyAAoJEFOcQ2uC5Av326UQALBzrx914us/lT+hEnfz5aRD
|
||||
E7TwOhrt2ymPVzLvreRcaXOnbvG9eVz3FYwSQtl4UbprP6wjdi9bourU9ljNBEuy
|
||||
OAwoM0MwMwHnFHeDrmVFbgop3SkKzn8JHGzaEM+Tq6WKHYTXY3/KrCBdOy1sQPNe
|
||||
ZoF7/rq4Z20CcrQaKdd0T7nAEy7TLQIXEnKCQKa2j+E55i584dIshxVWvNuwsfeZ
|
||||
649f2FTGM3hEg527BZ4eLQhZQLHkjIY+0w0EB9f4AhViZfutakQf5uqV9oRlgmHm
|
||||
QsN5vMKryC1G15HO9HPSMJf9mvtJm7U+ySNE354wt2Q2CwX1NdDLa8UUzlpGgR6c
|
||||
d4PmAyVrykEWdtk/4ADic+tu4pTJVx92ssgiBAQoi/GMp61KPcxXU9O4flg0HDYj
|
||||
erGuCau/5iUKWaLL9VBe3YdznoQBCzwquTs3TT1toXHjiujGFo5arl5elPv4eNfU
|
||||
/S0Yf3aguYbwj2vVrDbp3JxYjJouxklxQ2J4jOXD1cehjZ+xFRfdnyUDV2o9FzvW
|
||||
Cc3N04var7Wx8+0mtok0N0xTkJunN8rkxvVUuh32zJlFlvZX4u61ZY4wI3hPz072
|
||||
AFBdqv+B645Hrk04Hbu93iZ5ZgcICNZppyd6xZeBvqaEZXS+Zv92HCbxIBS9P7zB
|
||||
3sXmQT57jusVSUdQtfJwiQIcBBABAgAGBQJRcGlBAAoJELlvIwCtEcvuoWwP/ReL
|
||||
zhFKWlc/F35MvNyO1usz+qvs+SrlAtwaNcv3Dd9ih0mw+bH+U+PVVgXlk1g0NY9h
|
||||
NNRLxt2mUc+mg9ttN+ha0RkqUYsYjg1Wj9bDuR0a+3DhtuS9hhEjWrBBT3UbTcWT
|
||||
5lxKkUgy4Sj+Dh0N78spHo2orUN3qRw3VkHY4hWcxAvlXreuEv6J7Ik4uZ+8MMgJ
|
||||
Fld4oVhMmnWOrMwt10D58URvZsGypI+dK0p2JSue5yfBWkSMpFsJ8z2cCOBMAPQq
|
||||
9S63mhXZiORrxJS4pzJ87wcYG/H3R1pqF6I/49tWBlyZwiwOYs0fFEJc9idF/hSz
|
||||
en/qDDQpvy4gNF48if7SGEtOBu1vEGqWKvNsataNcjYgj4BZhDlMHgAxWn0G7VNR
|
||||
Vsx1D6nzOzEAlFa/PQgQfCXScJXRV72uKoMk2uuOk8yb2+toOW5LoS/0UbsnUi77
|
||||
VvknpZPbQPQ5svsGBCU1BQpDeFsQk4IMW5Flv1VVSEtxnfLi89An4HPMN92+qNUD
|
||||
RM3E/eLkFnrPdiB3yMkjAgDbao5Gh+CTszQ118xkhmRC+pNCI75AS/X4V1WrcAJU
|
||||
niTbFgBRZr4t2tWfLMgx44XMtVrKraROj7QH4rEODSInBBEWT2hiJeWm4QS1g5Rf
|
||||
oym4ur02xxqhwXAsCXFGFKZirXDoTMHDds6dI0QXiQEcBBABAgAGBQJQSx6AAAoJ
|
||||
EH+pHtoamZ2Ehb0IAJzD7va1uonOpQiUuIRmUpoyYQ0EXOa+jlWpO8DQ/RPORPM1
|
||||
IEGIsDZ3kTx6UJ+Zha1TAisQJzuLqAeNRaRUo0Tt3elIUgI+oDNKRWGEpc4Z8/Rv
|
||||
4s6zBnPBkDwCEslAeFj3fnbLSR+9fHF0eD/u1Pj7uPyM23kiwWSnG4KQCyZhHPKR
|
||||
jhmBg1UhEA25fOr8p9yHuMqTjadMbp3+S8lBI3MZBXOKl2JUPRIZFe6rXqx+SVJj
|
||||
RW6cXMGHhe6QQGISzQBeBobqQnSim08sr18jvhleKqegGZVs1YhadZQzmQBNJXNT
|
||||
/YmVX9cyrpktkHAPGRQ8NyjRSPwkRZAqaBnB71CJAfAEEAEKAAYFAlKGBO0ACgkQ
|
||||
N4Uj/AufSZbFOQ6fbHEEerx0zf6FtLG2/EyK00q95yQY363WfM6fXvEbEHe8RThP
|
||||
oZswxLAn96yfTNWXLhDS64muDntsPPpenk86siNzp9Br8qN1fKkZY2tBjyUtvGz9
|
||||
i+paQWowXPfFeV5WutjqRY3cn6xY4SXWNWyffr3XTYqublnWs4s+yJuHQeb3XiWX
|
||||
4o8p9csmTuC5sJgmZpkvppRgzRpHAd8VCzzC/cMEVeV2+cbFon4sHw5NJVAXbaRo
|
||||
Z/P4SoA6S2Tz0SB1FWNa1v9TEu57/f7l8XYdI6nL4y6imnJ/RZqgpG7gJUqJSwS/
|
||||
iu80JJqnZJ030hWrRZHHp2k+ZWr/kZgKGCxHbRCcQNpJCmPmSuJccVABWIkoKjgV
|
||||
R4jXDbh+saGYLn2eUUzxkZmd7xaDSNUBhP2qdtKlGFc8ESL0qZkwixLhmpgUgFsf
|
||||
7D/bGGJyVkhOji4rJDZx9I0K5s0JrDrEqO0nzYod08s7aaOcQrgMYcQA7x/Z3BlS
|
||||
uRRo6KK61dOO42SzSbFSEW5Z8IEfSoUYHoyN81kbfC+j/q1dpwg+Bhw9PTqSWfLi
|
||||
XI6H15X7H/Ig6NDK0U9v9s+gqmqG0AtQhEnCEqKNZFV1K8rnY+B+lNXMA0PIgxA0
|
||||
iQHwBBABCgAGBQJSjUjjAAoJEMQJSn+pq5SBKV4On0Gzb3r2SAx4CM9zAhGoQw81
|
||||
yM34WUHrkDESj2TrKw0sLYLMzM3wriEzFT+88buowSBT8h3ONNDijbj8NdjYQCfY
|
||||
90bqgAROZ+W9/dmV2C9dJxmv5kWJQ/5D2ksuVpu1LUyK6AWXEkV1KpIcRHCP+Kb8
|
||||
EWaMEjPPQbNJ1KrFzAFfIUeFTbBL5kMmJK5aYVUiHWnLZq0SK5OlWGqBihuRLI7O
|
||||
IoBOjlcoXvFoEgSkgUKpapE6C9VkErW60WCK91sMhaa8CY9pVDPaanMG2o73BfS3
|
||||
jGPylm4H2+8jlJ1+l5ietvoyiqOST1iIfOsbi30mxuVJ4JBvKtmapqpBwT6eNvCi
|
||||
PKsMyjB5oWI5IVbK8MDIaYQM9TL+nyMGhl19GzcUMP8tZRlCifM9b/zmMMt1sgVY
|
||||
0koF8AZfh3Ho9KLyXqNMUtXAFSQrAcTbN5SmzjlJtl+hz6uhiHH9kAeSX4MFRXX6
|
||||
JDfZxyAw72JqJkZaPEAKQCpodkNwNG9b2dedIBsTaD9IoEkryDtR17qV2ePwlCey
|
||||
muwNnGVVaJ8hLbI7ZATbIaSn7XNvMGM8hX0N/ram5nTvrR2laG1o1ss5oxtg7PfT
|
||||
rhMyCTrzTcxc8VskAgtbJjoyi4kCHAQQAQIABgUCUHsxcgAKCRBTnENrguQL99ul
|
||||
EACwc68fdeLrP5U/oRJ38+WkQxO08Doa7dspj1cy763kXGlzp27xvXlc9xWMEkLZ
|
||||
eFG6az+sI3YvW6Lq1PZYzQRLsjgMKDNDMDMB5xR3g65lRW4KKd0pCs5/CRxs2hDP
|
||||
k6ulih2E12N/yqwgXTstbEDzXmaBe/66uGdtAnK0GinXdE+5wBMu0y0CFxJygkCm
|
||||
to/hOeYufOHSLIcVVrzbsLH3meuPX9hUxjN4RIOduwWeHi0IWUCx5IyGPtMNBAfX
|
||||
+AIVYmX7rWpEH+bqlfaEZYJh5kLDebzCq8gtRteRzvRz0jCX/Zr7SZu1PskjRN+e
|
||||
MLdkNgsF9TXQy2vFFM5aRoEenHeD5gMla8pBFnbZP+AA4nPrbuKUyVcfdrLIIgQE
|
||||
KIvxjKetSj3MV1PTuH5YNBw2I3qxrgmrv+YlClmiy/VQXt3/////////////////
|
||||
////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////4kCHAQQAQIABgUCUfg8
|
||||
wQAKCRAiLOjENkQCiI1OEACItuCpRR9YS9HeORrELMBSd2IqJBeto6V0VNse//g/
|
||||
nCVKgOKJo2hpEp9BqPidjBvP20Ek/xIqHr/Pz7R6T1UVsjqtQAlLngxab81wJsRA
|
||||
QNuTpHQ0VoststglEsLtp/ziQYOvgt0yEcqKs7NmIlyA6/Uw4uzXF1D9hnfsQ1sh
|
||||
Iec3d8YpQGZf0jZFu94Hp9hpxtFkTI87yfUkqmFRRsNi9KGksl/hyN7pQMm1rmGh
|
||||
7cERHIHCiaUSu1THiAhEUc5hkMWlM2wbbFn9ZYVVGgoyDWyhDjn7qhKnERrF5dwC
|
||||
cP6mFGo9whO4U4lKUNJHA8OxtDb7mDhagY0wGVTqa+Ob2zqgqiqeLqTYdii7BnBq
|
||||
swcvkbm7BLGzpiLgyJsoxS6Rhzmb+eJiTS0Pkg22y3I/ehD2efoIO4qe/nuoBqho
|
||||
SRDkC1nl3o05NqwF+c4JB7rZo6mO6mSHut4l55avPAeurWXLdnWML9zPbdl9jJMd
|
||||
1EdVMUGfMCY5kmEkuPRw3yGYeTSM+fEB/AHj5bQZN9sjMUhatJZ3RihMoRNqJjMj
|
||||
WM0rdBHF3LGmoqq6YUPYjyfHwmNvTDpCkUM/Utz/zTmRUK6i982r3yV9vp6cdLpj
|
||||
/e8TyKMDD59EGRFpE39q73Bt7PLOY31DTrIvmXD2s4Y8KlerV9jr23yuPQht703X
|
||||
AIkBPAQTAQIAJgIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheABQJUA0a8BQkUqY9H
|
||||
AAoJEO6MvJ6Ibd2Jz8cIAKfXu8kXq9b9RqMsK632pt2n1jcuxtGyOYH/fFj64ZIH
|
||||
N3GqVVQ6TnvOzmnns3iAj+nbkxPEuWLq8MfpW3Aj2aewqOLsowHSI1RwIcBhoacx
|
||||
t+GPGenmwneM9ABJTRqQ0KTLSqaS5wkUcJJ7r6SgSJ+LMQ4LKHyIOr6OIvJy+Zqy
|
||||
M4Q6X21vTSvZVeCr5rweE/l+Wc3U5ENMmtWh7RnTGk7SpjjFZP+HHhkQ8OuaZZRh
|
||||
KOGUBIBlWd05jR4nYrkoRqolRG0gxkRRFTlIhfcr0fruof/YqlC8TqADn2DLhrWr
|
||||
Y62TOOnfA0djtaNNJ2xh1mGkFaophnedlqwiYIQCDMWIRgQQEQIABgUCUwjQUQAK
|
||||
CRCEQzF7BlX3gMtqAJwMblJHTT7TRUfMFUTp8ODTbt43awCeM0s5htFIHEGcQtQM
|
||||
oLtWNrP+wAyIXgQQEQgABgUCU95n0wAKCRBOpRTltBrmqEVnAQChZNcw4xBLHvzh
|
||||
Zwwde3w4R5B04YQ5IeSw4m5aHIn0IAEAoGR4ZXhPF6tjZg+p4jpX9IF/MerMx6C3
|
||||
boAMimHZ0buIYQQwEQgACQUCU95qhgIdAAAKCRBOpRTltBrmqIaeAP92zcglLcFt
|
||||
fLl3NLu8JlNhkYWr7DNWowJWjhVcFkNkrQEApYO7wwKS1N1ZSp3YfaWdLfDjEwMd
|
||||
2nEHloRWDaSMr+mJARwEEAECAAYFAlBbsukACgkQLJrFl69P+H9BSQf/Sv1aGS7w
|
||||
JKz7/Yi54t7hVmwxQuVEpvAy6/m6e/ikLRFInWe1kNiLlOcs5sjUgqQtoAlkpvw3
|
||||
5klIwmNtR8jRVZDsvwu0E1U5XIJ0icQEsf4n0N81rYOlwrQuzDNOY0p4a7jpLFAw
|
||||
MhNwrBreF4ebz3ZF9yquxmWuCoJHE3iA+J/FaMzmGdNVxMpQXUPOjdX1hNH2e1BB
|
||||
GwbUqpSlqI8qfjEVuYjZTs0u7xaHN9e6DaqwRoI9zcv143yY1FrRJuWFBLCsdogF
|
||||
xDDUKk2VwLSFw45dmZRTABD8ew0Y7kkwHTmsEcVg8PM6XAVcVOT04+kVZQJ0so2C
|
||||
d2sL041JreDaDokBHAQQAQIABgUCUtmKKwAKCRBI64stZr6841y+B/92de8LDKj4
|
||||
UjfV05o6e0Ln6lIRgxpexbgqyQ7A/odZ9K8B/N9cNNaFZJR4tAAt+E8Xahcyd3qn
|
||||
0rspvI7cdwl4pslO+DIsdoejuL8g7SBDWCjE9sQLEDLxG2hqUkCrc5mh6MeAXcrK
|
||||
12LKCq1uMPQzc2P5Prz2C4j0XITBzSGxukxtoC/vj93+h/gGcQUzQIq3L4QE1q8X
|
||||
F6bqTFpt6i+tJULSZdrFNkcg3zx0BkLAceGCd+BDv++M4BRpWuzkXH/tFpXq/reh
|
||||
uh3ZSstkvpqZot+q34GMCgGUvsM/U18akYJFYpog25rdYTLTs3eYSqR1ef6BQ4lh
|
||||
GWDx4ev41YIriQEcBBABAgAGBQJTBnZtAAoJENgv4DzFW8/jPXAH/RObXOYzaU0R
|
||||
8ludCEhJcWlx3IibYRCQZUcQUUTdiPHEiEVq2vPruujvL9KmK2c5lvK3TGuPm804
|
||||
F9MpCBWA6GSM8txmIndPIUuAKoZP/dErMo+A699BbBesTGY0v1pF6eyKPA5cgh6c
|
||||
OaUXHCCOl5LPiWN664Euwk+IUM8bi3Qx78PopW+E0EJehd3PLkC5XyBIIe6YI9ov
|
||||
Xe8K0B0DMMWDydgdafTjGCB/nSO/C1qpa7tVwvGLFdh9qhKndb1kbFYBHv957ZhX
|
||||
QoLFo9D1IAPEzXEr3q9FsNgaVvJNlJj73pjesO6DNfBEXHHr6IbGl/IrmH+Wgo7Z
|
||||
m4RIYW8DfTiJARwEEQECAAYFAlO+oyIACgkQj6lgRkXLfvdS1wgArBNLxdl9uDp1
|
||||
4N7kpYYWDGi0FMgNhyQCLzm6wFZVhZ9L1bwhel8j199rzpTOL96ijAZf4V/ProUj
|
||||
vs/LJ0Gm0eqLLYqRoloBkSlpmywf+T3wADjT5iT7AdgAjOEdqI34mrjDXE9/kbM5
|
||||
K9a8J2WWLtl4P4SaTqiWmQBJBbNBlaL5uIutqX9e2cm+/jufcfpIvAFi/ALCu0AB
|
||||
C2XnfAKpezotzyyk2TxmpVwemJeBscJgbF+mN4JssQQq/WcgGiQHtIxtZeKjpSVC
|
||||
+T99v4/oPscOyPt57cP5/QHgv3N87ikzCHwtfOpWXWJmHza9qImDPzxlk3XeMZyb
|
||||
fve4tO6bSYkBXAQQAQIABgUCU3uwcQAKCRCKcvkT9Qxk2uuTCf4xTAn7tQPaq5wu
|
||||
6MIjizqrUuYnh/1B4bFW85HUrJ45BxqLZ3a1mk5Kl2hiV6bLoCXH+sOrCrDmdsYB
|
||||
uheth9lzDTcTljTEZR9v5vYyjDlxkuRvCiZ2/KLmjX9m5sg6NUPOgeQxc3R0JQ6D
|
||||
+IgevkgTrgN1F+eEHjS+rh4nsJzuRUiUvZnOIH1Vc92IejeOWafg7rAY/AvCYWJL
|
||||
20YbJ2cxDXa7wGc9SBn8h+7Nvp0+Q4Q95BdW2ux2aRfmBEG2JuC4KPYswZJI9MWK
|
||||
lzeQEW6aegXpynTtVieG8Ixa+IViqqREk2iaXtfoxVuvilBUcu5w9gNCJF+fHHZj
|
||||
Uor5qHvZz91/6T0NBlCqZrcjwlONsReSh1Stez8SLEZk1NyYmG56nvCaYSb1FvOv
|
||||
+nCBjz5JaoyERfgv4LnI+A1hbXqn3YkBnAQQAQIABgUCU3+zcQAKCRBPo46CLk+k
|
||||
j1MWC/44XL3oiuhfZ/lv+VGFXxLRI7bkN3rZrn1Ed+6MONU5qz9pT9aF4C5H/IgA
|
||||
mIHWxDaA30zSXAEAGXY3ztXYOcm4/pnox/Wr6sXG83rG5M/L4fqD0PMv7mCbVt6b
|
||||
sINX5FTrCVUYU7ErsdpCgMRyJ8gKRh/tGsOtbyMZ/3q9E+hyq/cGu8DjhfEjtQZD
|
||||
hP1Gpq4cyZrTRevl+Q2+5juA4bCyUl00DQLHdCuEEjryq4XWl0Q2CENDhkVV+Wkv
|
||||
fuIOIVgW11j7+MmMXLzMMyk4MZtzgedJW8aU2/q0mPn313357E9DwMZj9XvB3JCx
|
||||
4dRjBR67zwYySVvnK8KMWVNPWcleVrY+oj1l9psq+d4pkjtAa/cd1mBfh7h6uKzk
|
||||
ekj/zWuJV0+HEbKRmmBpc8SWc4QRNUrCBk7vVfGsBLCmiCK9Rij1zgrwihrw/T77
|
||||
BcvOcxhZNd3Y9Vs9vavExF0/5IqclwcuJqQO5fRKmMCFi1rwT5ZcWANmJXdaN8H/
|
||||
7D1WNXuJAZwEEAEKAAYFAlN4AagACgkQRCkHtYjfxFfaSQwAjmRJHNBnTYQ2Sluy
|
||||
9KzmgtiVlxl6Maxr2zBQvXv4/mH2Sl2BeFWaM8kiyQzl6XZV5/q8TCkmskW0N8YO
|
||||
l+l6AhFGuh4PS8UWe050fcxJCB6Z6XUFdvVQ1F1dI3bNcmm5libcMSNFNS7pQF1q
|
||||
az4fmVniwPx1ezBdAvd4n4l4dipg2bW93iPMiy1JDRc1Um6U/ouW2KnD7l5/PkQK
|
||||
WLzSx96xvfimDD6DXbW+/7nFhle7foTLSlFOcyeuXCOQCa04XQOJGKZtiVp1Ax3M
|
||||
v8t1A0t2EzYlTTKZCCCCa9EDReI1m7EJZ7+SJueaW6u6/TuM887l4FFuM+6Bow0I
|
||||
EC8FJyPdZg/BqnZ3tK4xSm3tF6oxc8IkaQJip9R76hPSWRfzc7ooTbxQrzYVzTZa
|
||||
/pb6RfL5bTi3Q9D1xCRjPtkZIceMWfPtnymlTIDwdefzTT0wxj1vTSluqMih0LOD
|
||||
RDrmysDSx9MBfH+zhigweooCCj0wLmOkmT0PjgJvL9TBG5HViQGcBBABCgAGBQJT
|
||||
eNsQAAoJEPLvL0cGnouP5ewL+wVOickmGd+Dout44YAmPXSzdP1KervaRAWIQLFd
|
||||
a7XFb2krwGwIpkw7hR9qhAG/CWbF/WRQqWB9M2qQEaHP7LXjPuCQVf9w5UJXzKUB
|
||||
ft//PRF6IzBOm8g+yHY1MJo3x3PDd2Bym2hnr4iV4teVnoHiutAcKPndpu6idaTk
|
||||
hguNuKOc1hXqILi3x9WRVi1d2UL8MakyamVz2k2sRktKQEZ4goEYq+8kFeT/T0DH
|
||||
/bB5N3PEKwpK/v03T4fD8ihMFYwblN7Y+Rx0mrYthCIQYpfAVA6eXjyABv4kRj/l
|
||||
1G1ir8ar1PnrHiNp2Hv1aipDvfDZnNpicwySOrdyQgpjGao75Ipw1RNcCuS9DWUU
|
||||
POYYQQfknCeUMgtQDqoJBYiE3wp24QZw3PsszyMk86bQWqGuhdrmA97zwX9f1me2
|
||||
BdhwyLPkBJVt/6t2Tp+vx00VmhbQKLbpPIACzqAGw8RtUx1G5bmSjRgAuo6xWOC2
|
||||
u9Ncxt33u/zQ7UvC/wQ2FwHHD4kBnAQQAQoABgUCU4DA6QAKCRAq0+1D59sVj5pD
|
||||
DAC+MneOmun1zAq7WSSZmf+AI3BzYGoYN67lJ8QXTcgDgbqXAtGQvp71G2It9ugd
|
||||
PEeyQ4T3DxNIYA2uC344hdsVCAnQHO6NMvR5A1qBUldxp1w7GfgV39p1ANzxDNwG
|
||||
jwwfUQfqk9VEOp4+puut4o2fhyMmkC9RaGzWV5taPyWL1N9+JqfNfsjWFC5qeS9J
|
||||
OLTvhmk2lLVKnw7uKluiQVzr7yj/gqcsyA2sPfs938cIr96CveTdd3d1IWcRErB7
|
||||
2e3zb0PKKvrtXjfAMoZG0vrsA4So0D2Z3Y710bGgLQ1WYDlRw7YM7/XKN2WWIBWx
|
||||
LNfEjVIuVnpHLCTNdmntLp5oaBsC9TrDwUMDZ5DEro1XHijX3h7x5Ni+XU89ZodS
|
||||
eQy9uvLwkgjiZIxD4DfCXQNc7I2a7h+M3rvu3LeBIQe3v/KNMDpgL20AyLxUs7/e
|
||||
qe0zWm3F4sfYu7ywA/mkH1Az3xTWj/I76WlmKPSeJpNEi/fol0PCsTJ3vWdpu1Hk
|
||||
t4KJAfAEEAECAAYFAlKfzT0ACgkQ/bW4wGfyU4fk7A6fayMhAuOjAsP5s7GebYVz
|
||||
RI8Aj5Qmp4w7DyJRYpwTzyIVPXzLTpOmpQRp4sChlIA9YM/Ho8jhacvpBKDPuJr3
|
||||
p2DhVTUVL+BRRWoTFJyrlbC20ftr3nCOMEW4yHA2u8bKvHwPIUzasqqPtybJ2wdj
|
||||
Xx7V5W6TpwWnpJFHl6TyqFEsb0b/Ne61Tx7mB8m/0UUjKyu43O0k5p49dFA7FUUl
|
||||
maZmjGrfdxSN3HbwRXbaOmWYn4q7TRL56BmLWZklxwXCY1nwEXdkC/R0U0s6NNU4
|
||||
o07hahbc202SzLX9PaHCEAREVlTz2nVdIXcPUdo3hOIJhE/2mbfKTqB8WRgE5jfX
|
||||
zdogJBhP7D4pV2DyvE+SKvIXQ1Xp/2SN9hLWwBg+pQwjMpiFX+HVRw+6p7QorR/k
|
||||
2kryhtc7aUnMtkTuCq1tzzwbdGD7e8O6QPhuhId06GbqKLplqYPap2sVAONE6NHL
|
||||
zmWaY0nFdzXiICXSk0oTUS9NwmAn0WdCeC1pJi6T5iyopxDNMyIFFTBTDFjxWbeM
|
||||
o6HRKsbjnhEEayV4bwJ8IaPjhvEUTpDgyV28kCSRgJ8zvNLDD+nms6k39K7c0xji
|
||||
BgIek47zMp6bgTPAn0Q23hwCMf+FiQHwBBABAgAGBQJS0swMAAoJEKQiudjlJ9vb
|
||||
tnQOn04QseTRPp6toW3qTzPs2vFToGrZWuhRDFxEUEuR1GGM3UFWvk/a7UnaHsaX
|
||||
LqZqqKIdqWlCb1EwddFJKiZU+Fq/sRm86VAeK6OQkNwMtbIugW2WC9MPre8D9gVu
|
||||
dx5ZjYBNjqCnX+yn+33M7/LAa6Tr7GVUqV3aM0ltCmQHABRp1acQWkWLG3IQiA5T
|
||||
y64hXrCPr/dXLCyFsbUyXccvgTiqlKo5OCh6xC8vLI2OUjckvwoH5yWM3EnEE4Tm
|
||||
ypGAHk+EP2aVkNflYWMvcRbBAeLVKk8+a6+JyJJnLRKHDTKN6++kyceeTN4fb1Bv
|
||||
2AN+S+WZLkeTatibeq+78jn3ES2Yl9Jdik7KF7cSx9+Y7EcSoua1DXZzHVO4rPSB
|
||||
cWeH4yb+3ET6xUeyK4+iZqd/067qTxED6ZDf7vXk/8+GiobRC7ob4Y0IigH7bWWf
|
||||
xiv6DBuwpcRipVAhMReoOR42UIfL1IWOk9d/lcmHjmTiYvG6XRMcDAu3VHjUKE/j
|
||||
b/6vcq5hZ9dcBSzPQJ/mR9AtiqnA3Y6RfK1UrbpQ3rJUu4UF61NTi4la0kFAETcf
|
||||
JS2rTRgBJ+tbL0hPPVC/81ZzjF2mgnvz0CfVxXpQ7un2iLnRKKd7q4kCHAQQAQIA
|
||||
BgUCUwoVXQAKCRAO2qlF6KT/l55/EACE1KOCpGqaHINcLq4KWI3rRss/aSOj8LVd
|
||||
u0PcVloy1kZ2YZbB4UqNSYbzWPUASCm9kEFPlhqAUbVjyMZtALW4ZhgZSrHEUTGH
|
||||
ygdFNqRROhxg4e7Vj80sz1hym96KG8gdm5oLQTbFhgcYHKEBEgtfLmZ2Cdn35Oje
|
||||
QYVOyZzeTw+k3ihaJHp4K/gVZMcAdLFT+WWoXO5VzZ4+5g03rYbNGcsQ086IPQJy
|
||||
JipSUe0Lv7oYYc9pmJ6G0vbYM78qkbYm5sXe0S8JRjsH+v41AN8JmILzdQde63gd
|
||||
RsMpSvXkSHptTjxtLdlFf4uopPQRTK8K7qHkw3dTzpwO/kgy1wtrVGxsASuDxCmw
|
||||
/yDHuN3SkMqWgGF0IFqsJdy397fXggH1tF/z0VHXEsQPFlqWOqRak+hINRonEp8G
|
||||
q4b0lnLPSNxTaO36AXLt0uvsDuoyuv4szjsps57sxqbrUJ1QmblSC9xRfkAveaaK
|
||||
U1I50wURejtadqOTnxDgCdn++nN2v7WbjweWdFn4r7kF8ww7BAuzu0kZGDLwiPFb
|
||||
Px+n4o7DpymLUrx0W5udkdMxVhzxQit+v7RWqFFa3DzWxshWE9pJS9e+xvnupibm
|
||||
8/J8zzC5Vsz+brVGGPIDOFCGhq/5j7nSpk9oxaf9uaBSqcWoga0TrF1b/fjUNNUG
|
||||
LcU+QbnsqIkCHAQQAQIABgUCU4BKagAKCRDxLZhXQ+4mIKfwD/4zG06+G+lasq22
|
||||
qv0gQHzdkqXJqjlpkJ+bYgUbxvxYFevL+eXboCjImgdTqcN8xoBd5fMc3YxXbjBR
|
||||
9YmQYL+5GqKILme7bVfOIOsRlRP/V4zroIV+CnISEa6UvEKm2u0q+Or2KzZhoT+m
|
||||
DIfQpjhucnYNB+jMF5ogvaLCmPxu9Tsj/PytO84hPoiJkvqDrAq558JMQRAy5MKN
|
||||
3p4GyTKAjSyvqqUrmrcMnbSOhsuy2mTiAYxLn/CN5g+MJClNUhOn+sPN6RDMw6us
|
||||
QtmOoSws9ZKKGpiQNPFidNbtZ6SK43vO98mOkMNFnxOSbKdFkeIHYW0nC+EuJtkP
|
||||
WS1v9o1hW8M+rTRwH6N//51mZ9iCOhgyX4H1+3VPVuqYnfqedmwALoIYeoQ42x/3
|
||||
lRfQWlqJpiFbY4xwJKR1ifFerziqaIxvpcq684t2Hk8OOLNeAbH8Ucf/E3EiszPt
|
||||
Y1zaXk9u6SB6IY75UVXSba8OTGFDqkxqVbR+hoaCUputrDNfegmwe0ZKRB9E6Izn
|
||||
p80IbFfnvluBVa29kBEEKlgd05Jhi6YkbffBT5bWTu3xyZjEmqnvljsU8a3Ij9Ba
|
||||
MmScWEDPjbo0FE5TMZgHUsOQBwMIVSB5ra3kxGSh6ZcffOIUmYois3bE1K+/wHJ3
|
||||
Q3HWPSjdv6d2X9dcupz5WLL+E6A104kCHAQQAQIABgUCU8FM+QAKCRB4VAVOzv4Z
|
||||
5HH3D/9/lb9giwpUQn6YD0y46Bt9T+NuUcUy5sdB4B/lC2kCPA9WJq8eo/lFFuZp
|
||||
BTbcdR5BfHm3sx/sIuD60TieVDXSdKVuHIDGQh5T1NrodXf0xykJ1TmgZarAyMjg
|
||||
GXptbFLSX5GLDmU51G28kuAkmJH/R03z30N01nj0tIBIY9s1eK+ADzDyq3wH3O+t
|
||||
Qlrt9yGNEqmC8A1j0Hs3edKRiQyWJwViYsQK4CUCuzwpA+oUbJZ1z1v1Y/FagabY
|
||||
jTucmRgCp/FD1IOS3jHl01NtUIfSkG0BwBjlsW6VBVZ6J96VT5rOyW6wQOSOFPUN
|
||||
3pgaIhYFfgES0BXAXoUwQzgdzRzftZymgNGRu0Ox5KUx9aKYaWwvauuzb0Lw4IoZ
|
||||
TFx8GURfhMCgWn6NSLIF8MfJP9CbvujfovD5W5wffMk6cYKNq54/vVeR5H6hhld9
|
||||
7PQIqPefZjTOoDq08FWby/w838sjl73VJfZyFjOrLms8TusFkSLY/b1Kg4Kv28ie
|
||||
l+Ufa18goqCocHus7VNvN4YKTQGOypL0w8j6SvlvK7trH2NCBDVLU+sN6RxlVZKK
|
||||
hqMeXZHDvX7/jpNHhjiyZ6XqxXLxnXeFf5hiyh/k0irJ93yT7PvTB/FzCnKejQ25
|
||||
It2n3+bzw349vp4cC1xulk/ZfSD5gMXmsOUMZpDQ1r/9s1OERYkCHAQQAQIABgUC
|
||||
U+qnNwAKCRA6L9iUeafEwX2RD/0YMOSJsHIrPoiFVSFu69w8lvgPfvSQCPJrkoVP
|
||||
mdc5YiJiMGp8DVp+UW3JmOLKIUPUg5p2/C+8DLgjWLV0f53srOCdqp9qXBx/0yKO
|
||||
tvRNGlTEYywVPA6JOeNzjcdgUgBrkT8lw3Ij85+eJDVV6QFuTSPmeUp4hEESeNKP
|
||||
WKT0B3Ixl5zbVHO6Qfa9NibCKpOll9YkswJdynteFMkpVm+Lq5mpr6Jpbn1WDrRn
|
||||
cXp4jdZYG6yWPwQm9m/2Ua9ILqb9xBBKf7lNkywVbku8hmzZX/vYGZPGVZddex1Q
|
||||
Cwp6UNdUMaHUGhh/B7kf0BHseGPNNg8sxLE9RZ85vHmXKmQfUDvKY3Kzk1N8gogf
|
||||
+78KXh8pi5KIKzIq0GsUCujlJxIWDTro/Q3re3CT8M3op3qx2gjZbpsSmweoJtMN
|
||||
UfLY6hx5M3I6faxKB9VA3/dboBwsXr4UddQs+GUsBW5MevrFK9R4CuHwpLSpZBXD
|
||||
/GnQ0p3M/Ddm7Wy5lmHwUimStc+hkrSKrsEy8ixa5sV0hq7Ii2hE1xdEtFSOCLgo
|
||||
IIIAzp+N6MaqCEkmjCUz6//74Wy9/O8MF2ytu9cAu1lQEJrJa2YSJk8y28Y07y9i
|
||||
9fzQkkQSVympUVfRws2YBmqvuyxcM9D0HnIkivoo6ka5kCiMsYQ1Y3F5uDlOi6yB
|
||||
c6AM54kCHAQQAQIABgUCVDngmAAKCRDRWYmf09n4stAHD/4u6iAABcOsKmIKIw7K
|
||||
gO/2InxofURr68ZguHVna4C8Vu3aK1IdLsPyS59CUa8yqEuhBd4R6z0GrJgj8s/X
|
||||
JGXkWYyIUeZimLaq1rBd76Wi9lQC17G+eCqgEfJeP4k9PNyU5tZrxGzCeCRVRjax
|
||||
jVSFmHQ4H0Disw+pWbcEWUxI2ObvrCR0uFUb4wI7vNr5ZhMfIZq3A1dn/vUreNKU
|
||||
4TUfaNUXJ2uetjRZXbHHC+3xS/bjO5JhTBoneScGkVOG/4l4kmemHLTUMn4rZDlq
|
||||
BxtGil7yTN/VrCbpRygnpEouM+JzXeYWYDERRti+H84HJusDRIdPNcobFTeMR8VE
|
||||
U9Q6zIN17Xd2Y+MAS+VxR8kpbnUQnfz2D0ab33AsHiSfzk66HqX69wxsP0KNlZ2S
|
||||
nvh7vuCqWZweTa2CM+ZjHMrCTAwl4gPWHcEZRexLD/5mvBXWKccq0etfhkWPgDVD
|
||||
9SjKmrrSY/alux7SG6mmVBQLoZg+rnrXAq2lg+xBe5nmhSbqM3pzvXwcwYHKSYiV
|
||||
iozRJScaWj14ljwvnUFbytI6ctdlNVDad/DwbNfDPcNnjrAu9LVYZKOd6wq9XJS+
|
||||
U3W9d94zVqPo8lpinGBSgEc4hkN0NxkxPMnEcHm2XkoCB2C85vcxxmUHPXK6QtDH
|
||||
6GtPb3GwTcreTUU+rP5zhOLY04kCHAQQAQIABgUCVDokfgAKCRCaNKuaK7KJD/W2
|
||||
EACwKaI2AMnJ5SBBfBlZ7dH280mC8BgcVrjDJs3Yh9xx708bFAUNir3AUa70gtQv
|
||||
IDoaWHaLiPkUlz12+qZAR1iTxZhmj6dESqoCzA4vsCu82YjxEjCvL2mCUvUZi0ti
|
||||
syTJ99EGENFWX6yYsPiuXo0oHaBc96TqXCQjZQZjYzKHAOjPrujtTw2/zjqkj0ak
|
||||
pc2c7tUuR8g2jit9l12Y9tBu5bcJ+Wm7XZPSjvClkdm92U+hjM8cdy/N5QS+oXIO
|
||||
2uja2ECrF3VD/xxL7eqZ1QQSk5Oi840TQD6e/WtsOJrk9KzAHx3Rs0YXu+/NvCk5
|
||||
U5ZUFxQRCh+ptt3WkABxMNcnQf/R/qxvktLpT9VdiIM2vWoAfVwEiIESi48JA3TM
|
||||
znoX9KCrdFOj+pKcrUtzNNubfclQNqlLhugOQ1sMH7ka2PncVHWxeWaEGBCblwy5
|
||||
O71bodoICXJ3xmd3yB47QsL3ZTEUMw19mnac6Dcu7sWR89EAW2kjnhYRrNsRNf5S
|
||||
36UWlsPiEl3ae2/R4wenSOm0n2FD/eNDIu9neth1B8G1jZGlnuGn3ggFm07h1gnu
|
||||
I5z70wRdLeplOJPpcFqNLmGIyTNluFdDhkn5SHQfLIDsYJhc5Qe9kyMMFQXi6wlB
|
||||
L8ph6m01HnWOI1Elqy9ebHw48QIRicWYh3uMnasc+qdvY4kCHAQQAQIABgUCVGcY
|
||||
SQAKCRDNl0yaOU1jPyAnEACOeeeZEC4ODefn5qtazegMI6yOJVtdyI19x+OtjzL1
|
||||
Vgh4CVfOqPuf2m++O3MwNMW7M1vL6/ytImsgOoX8EVbbhF30JdFIf02o+Pn4SPHH
|
||||
1tvuRF+PpaRqznJVQrBx1X1Wf5PCy+5m426CYRvcY0hX+iQbaq/vwBbBCAPjGBhQ
|
||||
Woi4C+vI9wibgz745MKQvzn6L+RUXTxDlkPaHQtM9srw4wKsTpJg442dOBSeTwZz
|
||||
W6OuwDlJNubIah7gc1R/eDAD+x64O1GhXkUIjIDRJX/KrE87pMswhT8SeMshaW+e
|
||||
nQ4pfMMbLxnCZThH0/LAIt2E9idkKE+ygHBEvmmID9UNlI94L9DJGizXA7T7EBpL
|
||||
G8V6Iqav1soI9lMDkIfWVbcnI7r9A9i8nzzFUz1Ruug2FKWr3q3eUAdp09i2S8V4
|
||||
Th9LSKphVGqCBa76y59uQNGeUBcvx2z31gMOzyb4I5egKMU95yr6M7dLVHWdg3xN
|
||||
4eM4wVw4r92NNeBZoYKsBDoJwp/PUkf+0hzCbDCqfKMp9Zn87J3LPoKnKTob49l3
|
||||
zxKZzmwy6oPfCenshRg34RL9WzRDgeCHBRfGK1DRLuv60vpe6zR+75cO4VVhNA9R
|
||||
J6WfCmJPKj5TrhwxyzIHphAlG0ezoLetx946hXwwIZSgVGN4RuUu5aVoi83EHHGG
|
||||
XIkCHAQQAQIABgUCVGvw2wAKCRBcs2HlUvsNENekD/9dCHXxPGrqyyH1TFEcc69A
|
||||
lhwcLgBlepgigK4mEWhBIzFhU4WLEbQkvwhrXXPQcV1ORsLhxXBxbgQj+NuSPZMZ
|
||||
sYf0XPsPAP2WQFVOQOIGkgdIZDOaGXQdkMGJl6xAhEnbdIh8XM/f88gdUeKtiq8s
|
||||
225CTSSc2zqxqcRur4eg5OAfaxSXkWHO5VXx906ojhwpY5RwXRMPYkAxpeiBbkMV
|
||||
KjiTSSu+afuP39HiuuFtY1yNmxnpEwEN1dZgPcb+j/kkfjYz4OFkJcerE8pLGsW5
|
||||
MznHIcsfM85tQzR/cJuDbKGSjBOJ9LAiAewnWO6AUhcSX8kadSUj61MHTSF/JErH
|
||||
YCaTOzkYGZKI31lgqrerp5YEbGZrqxWIoM2RVgQBkXhyHhyeeHlC6YNkDyp8MFrs
|
||||
GB3RD6f227mi3D0HJJTzhp50MpwaVL7t1Hfxa+/uEzB5jiP3uRFFMim5itKSSz9+
|
||||
i6d4tvGx1EwCxdpqw5cd/qDEYxeYkskguNSopAUgYqUcdFjt4xc3UujS1XYzZcRv
|
||||
ZaYhwpHO2x8/XnTL7gJ2oSvxG0uoRVBJFkDibSSnPAfIVyZgoTNmMbRA1b9Bp4AA
|
||||
MHB2QL6YRXrsvb2H9kuSGyKijDayoKuFUPo06hx1yOey5BhwwmNooAx0BqP0rWyO
|
||||
LPDsOW7UDHVz5wDuK2etRYkCHAQQAQIABgUCVHHpRgAKCRCY3btOIsosg6hdEACV
|
||||
HVLUlMx1d1aN+qW2pk5wrcjqhKdl+S+cAo4flAMPShnmbuyYos+7nkKsSkLc9Joi
|
||||
529otzXivRFnaGiqzNfjyMpux+NAE2rq5Xig/bKuPW/Ofbc+Ysugy4dWD3nnrkFf
|
||||
zW4ycodOkszZDI5Hukt+AnKQ2tTqHM1bCNUbn1lTLqtQvePj2Q9MgglS4zFA+d3N
|
||||
AJXYLBV3XdqBFPyT3ez/cAmEilf/vRfsEWu/1O+x0SjR3dhQrIZidZm4ZNwRR0wC
|
||||
xZ/ZXdf5qrY1EwK7deMMbORsbD5K9WLFkNQPLlVLZ1t67J9FJz/WxXAH59/3d/Nh
|
||||
4bslvhzleIOSYSlZRv4QW73S/h1de2PmJLBnkFtbCiKpo76+wKxYQiFGKOPnpsgx
|
||||
I0Jk37r/EUTtbuMkdIgGapZJPP/M+d3sBNxxH3qcMbqOnpf7rkbl30Dpln3TRDY3
|
||||
fcZ+YMyA28KsL7WRMYzdj6JW4mkiz/96SPKa7azmLlvjJOaOornHHms8HT8nrzoa
|
||||
DLluYGRX3yqPcOk1OUkRnGCIa0yWPAu4dmLprJoq/116S2mnXAadkeLgxKB2+nhp
|
||||
V6r0mDBA/5rtX8NlTriqLHXQqX/yZMFx8MAd/c/nV6Nx2EqH8nnNZm95HALDlG05
|
||||
AIfOiFjdcpqnDU8srSvABMDix0NX+KNJpe5/V839R4kCHAQQAQIABgUCVLETyQAK
|
||||
CRAXv5SMBHYTfWeTD/9zqPDnO+u5URYtTo+RVaB70cX2b196Cqxt46YT5QgYCliv
|
||||
MUe3OWBAjSMJ5UPLgqlIMaRX/P4j1d8VbjtRxcA52n6JE6sjbSs9l4KZsn7Xlf9N
|
||||
nt9obAzRn2gwpKm1AtoZLg31lmWv4NLVn0gq8mWiOjpKA/FB0omHg8Fcy0F4BrEd
|
||||
PIhT/cYh8kBzbQqctBx3jrra44lomwA8BDGep/f9Q0qk0JMZ8QcCB6RqitTNOkEE
|
||||
+rctgW5teoK7tDerpTK9w3Odej3Ts0M6qNE+3Ngc0uMDsnWBO1BhHkc7swO0Oe3V
|
||||
Svj9Ay7aoYm5SbssQYDC8SiAoBHkeknI1kKR1tfWwsH5mxyKh3njQmQoqxdeyhLT
|
||||
6hQr+ZObs7Kj70b/clcI6NfyxfpNYhYEXs+NQLxpTQfla884kStGL3X0ucLUNSP2
|
||||
vZtoPqMlj4+nN6eewq6sWkohmvhThzsVMfq0JNgHQfJeMbRtxzbosIxMu+QmyrB8
|
||||
CAUXf/ZEaxnIpW9ev6LFP3P46+EKKSlPRyoW9AyHJaWPAf2THWFd7hvqMtGi5ZXd
|
||||
dBieEh1tMdbgf62VOc4q3k7nTm/tdqjHxegMlVf7bAKuKRCxFRQ+CDVsYIeBLsww
|
||||
JCQr7eq+135qI10xUd77/XxwnPLwFcEXW8StTSp+AZjFFZUsGcC8sta6Hi73gokC
|
||||
HAQQAQoABgUCU4BMBAAKCRAWINxaxqB9nLq0EACigGQ1GzxUgMkTBZa90xQGI8z4
|
||||
B8+PrXUoMBRml4x29W9GfTCSgZKo6IkzqOsrEzsxXjlbqpebRb+ZVEdaHByR7SF+
|
||||
5AEby65WgDAFT7Bvn/Rbe4zYNgdBN7qJGR1Dgl3b1/DuSjTBY4k/Gq2G4sNYboAC
|
||||
a0NSjCiL9xLE+WX+gJ8FyFDfHiOIVI2ayapsdY44Si2pt0i7hfGDKQCABcBW/zrr
|
||||
UKEVFOwkM1W+v9QeRQiGHUlhB7+bU+nYLhclAtqY8SH+zsc+Kp7T5OBwyba+LDgY
|
||||
+OnDVLFu1669t8Kb2mwkFmHBkHOICtdmwfbspXiKOdlKA6o6i3XW4Qw79uhrsiVb
|
||||
tZpSUeqFuhGLUS2S2/HKfvafvb1rS26eAHsl9zRrWOYsmZBmQo+2pLNQ78aTXXHV
|
||||
Nrt0KeCAWcp3lb1WGo/lDMv461V+rimLylBFusR7EeoPQyBlBSvHXsWHZER0Odrn
|
||||
k+1vXAOlfI8zBPAhPGArUyccPyEDNZh23B5K8dYjV899zn9qgaLqjH9rw18gL3f4
|
||||
pc3GvncDsqEhrptrZ6Q9jJwkTq36OHgngDm+G2eOoRGss6+kTbZrVIJ605ldIiMQ
|
||||
5MUsxl341lrddR9lvR+W4GjxvHRKinMRS2DzpwiyX75mJ+IYcu9jCqnSP+Pw5Rx2
|
||||
td4Abi/tnJtaUy4JbYkCHAQQAQoABgUCU4C3tAAKCRC3YYg7RCi9wBE6D/95FduH
|
||||
ScmAnKs1oNjVix1AApHlwhji5ikqFVVd6Bc7tTp2fSknYacFNDPm9ffRgFDOEOKO
|
||||
nCHk56i3f6ZX1nTQ5hLasPE+4chiVgB5H9J+HNZzYBN0BVuK4vMz3lj2id/pw5rO
|
||||
xqSG2HC4yyzQs0gHLaOvKb2iq5+hEOVDrm/e4OdNFY1pXEu6n11pYDHCry1S1DRw
|
||||
YFUsU8oIUA5EMIUZdSGQfi0jNadah4FmGXXjLuw18ytpuYbbHB43L/gZVcUVwjxX
|
||||
+s4e2SCp2maFiolgI6ds18vVZ7WCew6WzpmLpB+z1srPW9umoDFGvoh8pQT5coow
|
||||
tnbxBLufpsqjwWZOtE/jp7a06eDz16V+dE4MpW2mzNIJcaByQbz7YMjluUDOFDHZ
|
||||
9VgF96IVvvcueVsjFlj11p60JfbGe/UMii5qDyRPLu6XDlwaQSeTby4IUf8EW9OA
|
||||
z14y60b6hOVfpj6SGBRaxlw/cF4Y1rIDCFQuqMRh+eSyEtmYC7aNTCex3zBD1hus
|
||||
5MfzSBrLNV8W3e2TjL1BYnmNpe81llQ7NWgAN8nXOv7QNnpI720VozpCGwNnLZnR
|
||||
qqOgn6oqmbA4aKg7PsWOrSdCJDnpOU0QDBmzdxqTvdp9yDuQS6WfJs6IuPbqAzYl
|
||||
ZWZQy1YlnJRE7Zq/Qn72r2F7ouArT2yLIpOLrYkCHAQQAQoABgUCU4EgMAAKCRBd
|
||||
cNLrytLJ5rlHD/9ZFsn6AKiLdQxWPjnfry+R2NSDChutrfXN0033+5XvkLThu377
|
||||
tCBxWR6bIomLpjr4UgwQaNAX8t2gxxdd7pfoXE3w87hnb2wzaJmvhjunHFtGaxYw
|
||||
93kla1JzvZ09drE6q1pefvxssHLh/IbXwOqS+tBoJLcpqXDG4v5b07RTVtQ3H6ON
|
||||
t3/W2HRJDe9fj9VH0+feG2xlEHJSLoHgix7BivxiDfbQKATqWum/fFNvHB8bOnqF
|
||||
mk0btX4QFvTAj+Cbo+3eDr3zwO6PVyEa0M3ChYnKZkYtFUXu8weG7WyDInvI4TK1
|
||||
JtZns9dz0lQfCwd/24r8bQ4KmAcvgfVdnTUI2BO/mk9IPCVZqF6/Fncnz9fMA/aK
|
||||
1lKMKQQY+EfkYKUf0vEHzJWSTHxyJAIUgGCMGgHCeHW6GjRgBLSrEu2nh/+i1FBi
|
||||
FLlHkyZhUp9KO79IQ9D1Bnemd9/7+SUeH+XrGmcGXd/Eko4+5Tm7c3YJEC+bAne9
|
||||
4Ey4WZuddQ4zWrJ5SaUTqingfS0AlDjeOt71+kFi1x7Q+9oGhuBvEkSFFB47e73f
|
||||
VKFVMKqFdjeWUYaA5oPi2lKZx89c3W5lWaxOlhwQ6sQdSdwtPR0G1fNthCQFvwkQ
|
||||
6zfWNH4bnFjd0xUOmRvlF1ElnCYO88clGAdLTjDVCzbwvl65ImoZrcbKVokCHAQQ
|
||||
AQoABgUCU6SwOQAKCRB0N3+fakeRn9dvD/4gU5OqbyWqnvte81d89Rt1lclLUDnT
|
||||
JFabQbjLRyMpGWbgVEJk2F6bUB/rfuHWuqTBa8XLKruyWQL3pZM+PctMyrdHGKSo
|
||||
rxv/O6ggBEf8dxNuBaDJFVpa7DeSd0k099El0mci/pVRClOc7qoLStsI7LZ9sU+E
|
||||
7oUDmdwg0lsY1gY26nJeDyTp4c0pUSS9vJKtGcfErpMVcE6SWqyCki9nT4r8u02a
|
||||
5UnTzu7HSCp2jjx53pFWhd+f0n0wpv7H52mXAqG0GXeLCYo/sYHPxwySXH7EToUV
|
||||
CvQb5Qc3CQwqIZc9xZzsil42n5pO51X3MKkTJYV9q7DtGm0ECscZ1c6FBkgX+kqU
|
||||
at6tYNkkcuwAXtmJ5wpfnKvWnMJh+0tLcxhjS8HYxAB1AP9R3VavfOJKsdnlIkgi
|
||||
db8SLh0d+nDUGHcqZZ8a9Pm5/WG/8IIRehPvs/MZF+lsSk/6Flfxk6i/o3B9nnzj
|
||||
pLEfDH45k5J3EbBEm4tV/8QLehZ/Yb/qiVGrOzEpCgpIjoQM+2UcWTLtkHfVYf/4
|
||||
uoT+6rPGjDaPv6V5WMCWCWBrj6NKFPzYVpu8kzPjA63QWQ2dBLz/rddUf8jKx5Pm
|
||||
hV6hKk2gflMPy2mhzFr/mAwfJn0VBNI2xYqblqEreeRJXpkmtfJ50XIAU2xS/lPD
|
||||
ZtiBmwxnqk6Bp4kCHAQSAQIABgUCVMloZgAKCRCBxcxPiDKaHBTKD/0SVqbcdpdq
|
||||
mmiVta+adRdcEmBxYJFD1oEtpjcHIJBYBptPkIT14jQMO3r7emkzCzGrXsM6t4t+
|
||||
FvayGjY9VDbO1RWBFc2M1qwBpQYjIJJrx6ZjJjSKB+bDnOg/Kc8WoDmpbx4x112m
|
||||
0Qoeq/AuhV9o6UJsGFU/5RddAERqEDFufTBhYVHBD3xlUxVoEhI+YYCry4bw2I9y
|
||||
6i63krIirlVO8lPiuZDGO6u8E/vIxrec3aFf/fTo2yTqP2u4JiAxxnriZ0rjUUVd
|
||||
w5bWkcpNDdhUHKAfequd/vgXTV8AzU1QN3ilXK46U1yXMTJaXJg72hfypSKXLRAy
|
||||
AOdGkzZZC70SzJz05RTHqDclEUnN/BzuXN+XIYqB/M3ftgrP0BxsiPiQZU2TjqHV
|
||||
AeGpSgekih8DK1qcuPiX0sRfg1BFtFUMI/8FxKYvgdxyhmCNEDbs6RBmdar2qtFY
|
||||
u0qpXMim2br1Bgc/0XSngBYko5jjkFG5tnHm5hUM2Da/Clj7eNpJ1fj0DHrfdmmj
|
||||
VVoYD6e2fznL6VAAUGQT3ISbwUa/kLKPrqkLy0b5Vxg/q0Z4tfzWcYJ1YdXBzewo
|
||||
hSCTpRPmwaMyfsbQ8eWTgJR+alif25tfc0A57n1saHKjb93pPA7jNosjpr6W4tUR
|
||||
IuAEm4Q4zEAxZkTWMft2yIvNCiRJwtpGVYhGBBMRAgAGBQJSUrSEAAoJECkMEkm9
|
||||
2HALgkYAoL9Hez9mLtUeiYsv27TT9fL4mE+RAKCGNS3OO0mBVDAOxcMhRV+lkgG+
|
||||
WIheBBARCAAGBQJVYgtfAAoJEH19Eb9inVpnerEBAJ0wIuWRlKqtEtCKOVEboLMD
|
||||
q/0cBBYfGzu5yTlFjnDZAP4rNy5hiL5mEu5GJqGEY0o9wXNLzJ3bw+kNimI6dy9X
|
||||
A4kBHAQQAQIABgUCVcQyrgAKCRDHXurc0X7YRErCB/4uDl6B5/rymPi/3AK3LMyJ
|
||||
bLqZZzErK917s491J+zelFywOoUEWdH+xvUzEOonioTvKkGrQ5Tooy3+7cHojW2q
|
||||
SauLh+rG+b+73TZJyRSYDD4nwWz3/Wlg21BLinQioaNTgj0pb5Hm70NwQwUcFtvy
|
||||
JNw/LJ9mfQaxt//OFSF2TRpBMr5MMhs5vd85G5hGHydZw9v0sLRglk5IzkcxNdku
|
||||
WEG+MpCNBTJs3rkSzCmYSczS1aand3l/3KAwtkau/ru9wtBftrqsbLJZ8Nmv6Ud4
|
||||
4nKTF0dsj5hZaLrGbL5oeMfkEuYEZYSXl0CMtIg0wA9OCvk3ZjutMy0+8calRF87
|
||||
iQEcBBABAgAGBQJWc8vRAAoJELPrw2Cr+RQDqw4H/2Oj/o3ApVt42KmxOXC5Mcua
|
||||
aINf3BrTwK0HDzIP+PSmRd3APVVku0Xv89obY/7l4YakI2UTzyvx5hvjRTM5jEOq
|
||||
m4bd0E1atLo5UIzGtSdgTnPeAbH07beW4UHSG1FCWw35CwYtdyXm9qri9ppWlPKm
|
||||
Hc91PIwGJTfSoIfWUT6MnCSaPjCed3peTXj4FpW1JeOjDtE3yR8gvmIdIfrI4a8Y
|
||||
6CGYAUIdVWawNifLahEZjYS2rFcGCssjBSaWR25McL7m8lb/ChpoqpvQry3MaJXo
|
||||
eOFE7X1zInPda9vDdWR4QFrLDN8JjxzBzwsQcfaA+ypv95SlD3qL6vFpHGHZ4/6J
|
||||
ARwEEAECAAYFAlZ1TPMACgkQGMawXRQNVOhCaQf/aQZ0xEVW+iBuqXzd65axP3yW
|
||||
S9dM//h9psP/UKhFzfxCdn3XzmJ92J0sv22DjR8AbbGLP/H9CeZY8nCQnYOHp+GQ
|
||||
ikGJNjzyd1Zni+Ph67EYfEV2eqRO55GGmiRtUrZaur2pfnbNsvTQtA2rGXen5tLS
|
||||
sCh4qDNHrM1TlP9MSV0clzoVWRrRNvkODrSDaCdEEDrOqfy0AEFlLmBTqSsduo4c
|
||||
O46j0ruC0SvflYx+2HN3rVtZzt1wrhaPBPnV6gP7dhKp9XM4erWV40dP14YyDExZ
|
||||
oKNys7Kq7pnRQMbE3HL6UGa8VPvu9eiELs7kw01pYBtYl1my9ekminj8cygpdYkB
|
||||
HAQQAQgABgUCVolllwAKCRAjRRsQeqA5QYnjB/9oDZYh20qEpGIZRSmur8M/cGFK
|
||||
J6IMxBHFIz73PM+hHB3v28aYRW0lXGu8BNGZVxkTuTjd1HlSFMCNpcNfbMmRhEGt
|
||||
Ep3qGq+cq7zu72lVEiY8tJliq9zyOm+guFzUQ00pvaXuTUFlshvwlRS+GIGn8U2P
|
||||
/SVRGqSOqCkidp4f06yElt5QifwzvHT8KvxjPgFA5NfQAXE5i/IoepV53XDhECqO
|
||||
vsORbc0JT8n8/4hT8qHTno8UNbYK5BQjHlby92v7ZFVgI86Li2zb0HgQSmvpU/qR
|
||||
ibSzg0gEUrWwUR4knTkoKYQwjry2bQ653oNgv0OsnSGEroYOyQ1Q96jOMFKViQEc
|
||||
BBABCAAGBQJWxLxwAAoJENnYUJL2kkSzPbcH/jl1mYhR4f25pRe1InyR7BJF83YD
|
||||
hJYIhbBCGqGVenFEy29hco832HkhMUukaos34KZjsWGDFX1IWe6cxOJvBZsDYHua
|
||||
LCueh5I8/Tmtq+HuebuF0RJtJh7ItJoCrEv7ZyUQmbJ+aHLx2pXSqYUIiWlPvIlG
|
||||
2/esQlUo7pOub7eEb8U3oKWYgs9HkytMeHSTKiuFJ7mzEyh2fLcgsc2q1XT4Vxuq
|
||||
ksWxYv8MstTOxrltQ7LyP2QH/BzfqI5yE3UfSSg1sZE2Nh2cIFNWTYVxdx1fBJWG
|
||||
tTT7l2o99mYwufSLz1UTbGF5PcXeK3sYxN5IJta2FUByaJAWPJonRnojinyJARwE
|
||||
EAEIAAYFAlf7Qx8ACgkQo/9aebCRiCSTowf+Jm7U7n83AR4MriM1ehGg+QfX9kB3
|
||||
jsG1OXgKRpGPIORqxLAniMFGQKP/pqeg2X530HctqjpV+ALG4Ass/kNn4exu5se2
|
||||
KuThQMKLK7h7kfqCnrC8ObeCM7X70ny80b2h+749xWZtahpTuQwVrhcAikgPfS2n
|
||||
XSKdubOyeBH3y0kT2zAoml0MOQsUb6yGycjdnbFrKvfINKfuZvF+z16YOu3eYZ3N
|
||||
O6dErWQ5iTecuNe0nnn30D8+nWA5JfCxNDPfc0e85dm6xK6GTPdaQd5hpF14TdYZ
|
||||
u5eT34BXJcmL5hJ6MzM+OFn5CIn2Xa6r6h9AOp5C0o15Qb6SXpUdZrV/34kBHAQQ
|
||||
AQgABgUCWCj2AQAKCRABFQplW72BAiXGCACSHG54fSeKZysDiX7yUnaUeDf2szdv
|
||||
egD+OPSVJQhcDdhyC/YnipEN4XFpeIkpxUrBXWYyy5B/ymzDQl95O8vI6TnDpUa+
|
||||
bvpkWEAlBK2DuElRojXfPo35ABu0IetQ9xyR+3IzaepHL7Ekf0n0H9vFTmeyYUc3
|
||||
B1m7RDwnUJuAlWRt1qQHmOejkzTDBZALeg+BJ5PtnWqCr29+JZB8cwUJ3Ca8Ypbi
|
||||
CrXWYHu3jlXDDyEhQ73t5OlruOMiYp+opmRySu4rF2d9yJIXnq6uf0WNb6G6JzlV
|
||||
MOqHKvtmrnwXb9zlFTSXb/NkxNmbYPrTvKmSr09YDC/p9iRkuDSeI/OEiQEcBBAB
|
||||
CgAGBQJWlDXmAAoJEISlRGJ0Rpv+6/AIAJGPLDwkeCSkBIGwkg5Mtrlc3PNkGsX2
|
||||
hb2GP6CUiOeF/UAYU9HcxLv62nK/2qY8o96XY5D/CDOTMmvfr/S2Siyp3u6SVDbE
|
||||
oj1KX7nTzItfWdk1t/uxfC0+d1zQC0tyJ5O/DHQBDabsZ9REZDqKjhTimilFIWlu
|
||||
Gov3Hdaa8xkEij9f05REarOBNviaYUxoy9i5Vfo6Uh8jA9XaXw+mS5RIrssa/KlF
|
||||
fh02wXH5xlExHeepo4g79nFD+lmnE5T9PhfjRnBtogCV3ZBehApS8hJze9JfLnex
|
||||
7l1DGSPp6ydIyqoWHbk8VYiPMPfHMSlXpaeuprfq8xdBhqMT2a6Fp+KJARwEEgEC
|
||||
AAYFAlSakYMACgkQlARpDCzjZAx4FAf9GP3vrIvZdZisDqcOoRmKl8iWkY5X3lmx
|
||||
e5BaQ4qjQ6aUvxsopqLN4ETLTbp8oH9c3sTyshQA0BMtdJFst/ZjhDE9pU90Kel9
|
||||
CMbEgq0I5FE5A+348Ovmobe0TUPn2WClwyRGPCe4X0WMEikEHs3Bb1CFzYfbbIe0
|
||||
N1M/DqjUvfKv0lc325P7i2DlbDuUoLmNMgHHx6+jFqsxlNCobkq+IrhKLxv27/K3
|
||||
13UOzECiPRIbMhHmLHQic9MeJp0bzJiTo1icQVRnim5ZovcpXW2piJQaWqx/TUXG
|
||||
aRdCjYrJJJZObIi6qnSB7SjdxwJUq6GuTEb/BJElQFnjsxySvTu24YkCGwQQAQIA
|
||||
BgUCUVSNVAAKCRB+fTNcWi1ewX4xD/d0R2OHFLo42KJPsIc9Wz3AMO7mfpbCmSXc
|
||||
xoM+Cyd9/GT2qgAt9hgItv3iqg9dj+AbjPNUKfpGG4Q4D/x/tb018C3F4U1PLC/P
|
||||
Q2lYX0csvuv3Gp5MuNpCuHS5bW4kLyOpRZh1JrqniL8K1Mp8cdBhMf6H+ZckQuXS
|
||||
hGHwOhGyBMu3X7biXikSvdgQmbDQMtaDbxuYZ+JGXF0uacPVnlAUwW1F55IIhmUH
|
||||
IV7t+poYo/8M0HJ/lB9y5auamrJT4acsPWS+fYHAjfGfpSE7T7QWuiIKJ2EmpVa5
|
||||
hpGhzII9ahF0wtHTKkF7d7RYV1p1UUA5nu8QFTope8fyERJDZg88ICt+TpXJ7+PJ
|
||||
9THcXgNI+papKy2wKHPfly6B+071BA4n0UX0tV7zqWk9axoN+nyUL97/k572kLTb
|
||||
xahrBEYXphdNeqqXHa/udWpTYaKwSGYmIohTSIqBZh7Xa/rhLsx2UfgR5B0WW34E
|
||||
8cTzuiZz////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////
|
||||
////////iQIcBBABAgAGBQJW5/QxAAoJEPvqMRCoU3iU3SkP+wRdT8z3EczONAcv
|
||||
Jsu7ZHgh1ggzsmozTciSuaAZRfvFmUyB9h63cKNTS86CIrqHmMZrtHRu9llkNNiE
|
||||
4Nj8JAAsMPSR4YaKHfHxc3bOH0iWtcPxtIiQEwYs/7oP0/YzFAxcUmZBDeLvy7aK
|
||||
pFqdPUcEhMTWmscVajjJXv+6G8IZwYGFAFvSkYSimZP102gmgKQhcfPDqmlqy78F
|
||||
t+T5MfIha1Q950iZyAM3j46lVWMkBaKPQKq1G3kKaL7Sy3o75y4N7lgzY5WfYnBY
|
||||
VAU8eUjv408FoFKAYFTsA3RG7P2VROoNefPaLRSgEgZPR6efVux9Z3R4zOUQuljv
|
||||
q8r00zMS0t5RVcDp1gCNZQ9xv2QeN/ZDld0U0IbDQRrlT15+l3SthkXapMMvbSVK
|
||||
EILMgaL+ysl7raMW/Zqv1KN2ByVJsPjWnwWCPnn0fMFWr15ExzfZBUNh2rZlQ56j
|
||||
BsJanHF69Th0vI7JNm7/Gd5FRWL8RcXzAL/UbVDuyGaO2JPztQ2dL1lnHVL5mgOM
|
||||
js90YpADenNR5XkQxuazTRiQIOXfoZhgPwe99S9vEdYM6UPYZjt8uo1bmFEkV0CG
|
||||
jWngJc2ySSurftXPFJ7gzFhDbx70Ga/1lw/4H2RPs9ZiZKKTtiGcDLhDxSuX5z3M
|
||||
gzzD3CNp7uKJQlTIg4aFeX9JWQvUiQIcBBABCAAGBQJX+0LWAAoJEAJ4If97HP7G
|
||||
ahAQAMxf3Nyab2t+xJlFR+/ZCvqMq5rM8iq67ZK5fLG000RjLiBN5bd6BglAq03l
|
||||
2DuE3b9hdnosKfU3FCeysivn0af0kxjMaH+W+9JSQJ9E5EjO+RgIJDkn3n6X/lQj
|
||||
Vl3N7R6FeaWY6Ug9paSCtAlVlwCfg/rn2jFIiHQb++44nQFpaX4WuNzZWoy1SOGg
|
||||
32e624fjsgqB0aH2cmY3oGdMFt8FGuzOfa89JGW8P7mUeZsiQQRxR4y+L7omQ60r
|
||||
lveKZeEo/ZVfSZUVtzM9wplXpUMbF6/XtUC9dmsVrSZePrsAHnjjbbk0GBKit2Us
|
||||
wC8fKdHVz9YiWKuM4QLEWiucYLkcWcHUFyp1Tk9ZeS3R3yPASC4eWV72IVGS0mjj
|
||||
olcFwatMfYghQ42+sR+G6duEcJSN7sqrdzYxRny7aYz7GFXv1GCEiz/CzhepHDRO
|
||||
pu9KZv6xetyP4xmaunanzzrd7kM23530jFRK53GJ/4p6XlwYA3jNsxaGoAADOTIw
|
||||
qolgxtvdrNwEeX0pNpFI85BXSJrvBxKseL4o2NlxxvkyrLPIuuU6EfnOgMtu5v1j
|
||||
gLkA3ON3eERxl7DM1I2bqFT2+Fpvsme6KFm1o4DepsO4wL9ZKmqUMZs6AxfmUopi
|
||||
a93EtsZs801vNNUBmSsh3pvIyXGc/v3v2LJY236rsf0DmticiQIcBBABCgAGBQJV
|
||||
fZS1AAoJEFuCGoE7lKfEYBsP+gOUOmmHg0c09v/iPkel7JJGcNnipk4z8xl5nTxX
|
||||
ay4nTY6TKtelOhQUBqDHBqdOe8PNWVutXqSDQKyzRPvXJRYgF2i3IUHq/GtCK2yP
|
||||
aGV7XnYfEvddXmjAlYS9LkHcYH7zp7vLMW/8HgZ0JjeHAfmNF5+Q62rkDUMVBnSR
|
||||
VlA+1mc3/o1O5p/Kn1Tt47kCkLJUMNyBxXl9BnbqJtFWKzoqgMovr2QEIZeUQzlJ
|
||||
KygexnU4tCP5q5VefVqaVnEHkluXJq9knYK/G3c2Pet/GEDe5FkukzouQvcqGauj
|
||||
jvc/pmT7VISkeO4YXvmfctOpggJ9J/ohxg4RgvqaRYdGoFgnNQMEnFLIxd5+8Sb4
|
||||
8mskS59rVwwOllWsbR+6T/ZDW8FYmpNzzuK7Af/JoOcWy7/j0fwOhJa4qX5aKgph
|
||||
5S/rE9pvhmhbkgZta5m8GQ9bHInQnbefud5axRtSyx4cG1ZB/mRLFD7+kkVfW/Kr
|
||||
tdP/7PuuYtIP/nEhs9HnwOmcoRI1WpDGERC6eUc+Dgc5sFD16tvp+2PW8/EBAWQK
|
||||
55b9jZ4Uws0D/3Tn8BE0CP1lJCZzIzKqbO4+VhWNq0eJgwZWTUNoXQuFP1gOhJT+
|
||||
yqtxBRBP9YAOg+bO5kdjqS9IinbbYoaMkY8rUmqrF5r5XNob9mJzgF522npjWOx4
|
||||
P+7KiQIcBBABCgAGBQJZtcGvAAoJEGKrbC2pNmtMIVgP/0eNCkI5HX643HQs3G9x
|
||||
Gg8OmyO0Kk5wv0T1BIAwPjA2tzz3iNEmVMDac8/3qeKCfOyEhdJpqvZxRZ8BKoOk
|
||||
mnIvbwdxPBow8ixdWGLN3ZIeRJL/c9/oxElQ35qyVmCVEkvSKFvpQAG5mvxq4usM
|
||||
RBeol/f7VSsKR7kqU40GamW1q8ExoLkAmnQAHfHx8dZmMBBG4tgVvSGwP0gpKByd
|
||||
EI6xtJXGexL6JumvHmmAAnImGQOL+cfv8oaVp9vXRFwrUZsx5ObGXtV4xeGTr3nd
|
||||
+ZvCoocK6AHXcZiLF3XsnkoAUh7IkTsFPMjQ9w3lb/E8MPjfLrIbw0WJYyNk4VoM
|
||||
ePFYfWjGMU6zVRKwdurV1ndiSC4rZlapqfro78+u8pDoijNpzFsvmy4Y89w80N5l
|
||||
5qyMZ6PMOoZo+iH5hvxITXCtCJHs0QaNzvu8PZSG5Gb4hVn+NcjHUfqulNxTIsyf
|
||||
ISyvbdgQxEmFxSXeHPoMOhvaZn0niWL9JRAAXyM1urOhPG3mo5sqGPpQu1/DbbkA
|
||||
2oo02Uw/Ngh7MP7ujRhwsnC0BQOEgshkeEzACJ3FwB/HbZ1bd0eMjhhcMPwT4lbF
|
||||
QFadcFEhBSd96g93xpeLIIVw9+O447MtA8GHHmng+TE7QWFXL/CUu+n8l7IQtlBS
|
||||
t1KMktSgWEqs6LSvsySDMIETiQIcBBABCgAGBQJZ6mC5AAoJEKhbOua8Odf3rvIP
|
||||
/iiehjNNyKMkzELw7xLRXbQ7AXesG+BKkVXBFZ4ertW6B1ovIkfDmM63Xv3xTQDC
|
||||
Wjf/AewDSEF06k3TpV8P1a/Weu5ESnigHah801dk3GoSNs0CWRSLmZEMwRnyCK96
|
||||
8PlZUdIdEr80SCy0pijFtuI2h81GbLZl5ic09jSXu2up+IxMb5w/cF7EeHNbyFtd
|
||||
n6WNnYCCWPM442eTpm1241+DCw17MvuOyyUSH23bBc9VePe3VsBXS0aNAJhZVrAu
|
||||
Y3UWFEdnVcwmN0QIO4qTqxApT1jaMjvaP5O7TQ0O1X6nReJ4217Dlb/Vj3FzVZl2
|
||||
f/BLjlQae0kBD/2p8waX8R7KSIvzaWJxtUWroOOgzlZgkzj1coD0PK0yysgM0Kzo
|
||||
HEJFZcFz2Khde5SbbTz3iWE0KQgLiBuT0MVxRWrJcWq1b4cFeCr6C10ppmiTWqMl
|
||||
kWFczhXWZu+83b1uMeV1iXZGC0ldJTdscO8O4o9IXdhjr8BiLm7qsGuGJCtWZID8
|
||||
+5GlY+A09rDmwh2Kr5R/aBzQ+JPmzbNYvVmqAvMbYnl1IDowxWv0w6kduvMfTbUB
|
||||
6UkM/zfsbl4PccxlPXO1yPsiFe+f/HIJMcM0aFGqjxY3SmVtKcDXqy7w7Q3uTiy0
|
||||
u9MCqXCdpJRlDoMauM65Vcc/i3fR/MZdqPWcHcL8zKjSiQIcBBMBAgAGBQJWOIXX
|
||||
AAoJEE8/UHhsQB3OlqIP/3lofZqqiV+uoiTdV91Tjmij9Rioz0kohpQsm/tau6JK
|
||||
XItjG7DaG3XPL6NPckNGI+twD393Hdb/VkqatbpxLeJUQLoCjV3M02p6zDJHQ5wP
|
||||
iXgC/8HZVdcP2jlvnrkg4N5dpLJJK4wpZ/KXMsw/SrBj047ZnySIl5qw9ytXrQm5
|
||||
8R7FBB/ANjENvo9C3LEsaDAKv0TL4vyMpz52TjUfgoz68g31Sl6KKOw1HG+dUB69
|
||||
M7MARSVEgaWUOm33eM12QQtCTndJQDg+LeYjfvfHbcnMZnniCZR7rHGxAhBzgKQq
|
||||
JU/JizfZ4FDcBkABhsUQgkSeg3llFVzSU1iofT37A5cbQr0xUShPQwKgkESryuyL
|
||||
059neVsAhDY/hFeyWCKtVQ12i3H7cvzRlfYxD8c/mN5TDiC70Cft1pcLU++u/6Ga
|
||||
1kuzA7rkfoUocrCSjqb9FwLBokWcwbi7SyA8YD5m7W8sPINx7reokK7mvDsbOxpB
|
||||
p/y/yT5ZpTjK3/MNgESrq2N+Qg9EFC4Srlg8wzovn0zamzb2xDJpLfrV/t2DsFrV
|
||||
f2SWFd/YMjkljOLQhbsEpQIdrfS8/hNGgfoUIiko8lqNi50sGQ7kO9kirmjCZaAu
|
||||
OaOi8U0K1C9RvVGTN3oGrxzRRXeqt2Z3bBqs5Lz5lrCNkerWZYXcItIyZ415i/Fs
|
||||
iQQcBBABCAAGBQJYBmzwAAoJEHpjgJ3lEnYizrYf/izSP1V5KJewPvWd6nSHcqjA
|
||||
N82KgKtUaFdUs8ZObqr1cLluzc4jgV6+4YMdySN5vlJWi6LxSwsFn2Y+BNHkRphr
|
||||
OI4vNlevtZ3MywV46BExX1rDSjzovVR74uDOfwgXp3ovCa1cIZVTuiJUKGzuIpNP
|
||||
RJwfRM7o6qqFaTDAEULYJ9zKN2MYbIE1AgvwO4jvG0AtNsBU8qyG45oaZiAiQ3a/
|
||||
pHftfKg4CT2Yd9Zva2FcBYGhEFPG0LSoH/+bil9QqIW6hehyTSLDZGyBVpdANBCv
|
||||
Af5jz2gWC1eW20gsISDVqNzQtqWTIZbU0D+rmyNWve50Y/bvrLYP1g/1ZSAoMSFI
|
||||
cd4msBr4yFePXzzNW/ccMXGsaLINtTq1aYwnGBaDEFILA88LDGc9S/hf1Ldkfyg9
|
||||
0oVxPshbvofWVSBcfrc3fU7en/AKR28PTHAC9o5XaLiYD6n2aCvspdz83Q4CUrxe
|
||||
ELCDQRmZonDcMxLwYGsY+T7mwW8uhQYTK7HeaB5+Uu8gGgPMBpWZJXoci4TeAu/7
|
||||
GZorCBmrX1SSWDz9IdDX27X2fdKNvGmqWasAgOUdr14P6Aa3uaRffg/eSqXUVx2Z
|
||||
SE33iIDeG0+boX7nMNgkco1g1Hy0ZIfp+IKUYrm+VqvJanKxT/fL+LZsjZYLnz3v
|
||||
UGTQNcEiNvv1pTeFTWV43+eDtAFnUrTOhG2a2pEgQf64mOpr+DM3IdWhFRdMDSUp
|
||||
ksNaVq9UxAxr1Hdag6eCgaml+d0tHjjacpBh56WOan5udUKMC5apjUD+BIbZg6tr
|
||||
YhU7yEfOTCclGhPgQyAzq5qYu8PcTg1y++E8eBRnC90qj8Ae43VBG+WagAmVcE7G
|
||||
9KREU7l8jdUtb1sY8/MJOZN2FBP3i2l8SL4Em1JMQd/5HfQmIZ9ufR4r6X7k9q+k
|
||||
onkHvcFDkHUPS8myoyi32+R++yOfHqvckdym6oUHHX8VffT/9cfPZ1pL/Wf4REtt
|
||||
65bBitaDA0Yicg/05PKLQPFn32tp5DcMy1T0ZvkyXfSaZQNrv0Tzv+/Qn6mtkVN0
|
||||
MH9BklOKgES0fERCdikujbIPNI97NjY9Dh6epPkATzKNhYvA3XtvUiTQffcexn/v
|
||||
0HbTv0LVPI1eWvo1TvWZ2ObrEaWIPYelDlJR8MbVi+wMOPKDMtp1TLwxhRnMe9hF
|
||||
qE16fTV/otD89t+RsX9wuG+PfL0DEfwjgNnNCXMImCtRRSkgxTleGhafVF1nj9ac
|
||||
mYdu4gwwjvmV9AK627e8va4cFxBHdjthbSMhiDWu0HRwyS3L++Sl/6G7X384o6fA
|
||||
xku/LiFbfhJ5chHXKw59Hfl0kzPBzCVv8ozWnlfZ+P4yB6zDKVnn37dbbnuUxQ6I
|
||||
XgQQFggABgUCWl5mOwAKCRAbuJwGAjZ0SXlRAP4t6mSiQJrMgGQ0WdmtodwIRKBc
|
||||
Nbl/x/52k7FlWjlnSwD/UWQ/vQPozDkdtG55shknoxrnojv4eODalVKz68nTnQeJ
|
||||
ARwEEAECAAYFAk93ElwACgkQw/arJTtbsFxzLwgAlK9u7pGTBW1POc1ca0YVepWw
|
||||
I//IkwCBTaWEswCXrK9QyT0itHIpmWjHEV4E5upDe6t0tCpd4MgmaGsijGLHky/Z
|
||||
W5JQnu+P0bFOz7Dq+V288dzgHMlZHxgAtOeB/JRREy4ldXoHGx5e92rZaE551Km0
|
||||
uAYoWBkBDEb8txTOUsRLfYfUiwQeeFSFuaLzKutHuxOLYoPlcFQl/pwN4RvAFBB3
|
||||
QwOuvSg857vAslI20htiPSFcBC6DkB7MmuHR1a8GokhnGb0cZOwxz52emBZqZW9w
|
||||
Exd1fG0pq75fEF+vfnNUUPKU25QuvyGPhma04oogsJPsEI1DkemRVNceu7aTBokB
|
||||
MwQQAQgAHRYhBCBZ45m5ND49iWNTUvFOWAEoAwsZBQJan/mIAAoJEPFOWAEoAwsZ
|
||||
FkcH/RRwfRTdhhVzYTxka4LUs336LOXHMVxhSrs5jaCc3HkDaXnFm7FrswhuYDTi
|
||||
pUToE80bCFffITavCVoZVYhB6vnzlMLe5u6Zz0UpgxiFvsgKOMBxrKoDtGOvb4sO
|
||||
ukceKxvoNgA3Y6hX6OSrkta0DsnheTDCSj4/Erzy8VnH456XQ4Ozjp8ybRuRT74k
|
||||
npLQ3OpDGnO+yJxdlrLSwcpIcaXYbaGEJPLmHSqMQ0FjKjQxIdqSZAChCzJx5fPf
|
||||
LojU4C6oDkKDQAulFlSEw71B6qKvriNdmVusdpsFQxViEJ01LJ4RJzyJTP81B4NA
|
||||
bk5lL+f/cel71nySZB4rPGBAV12JAhwEEAEIAAYFAlsdRVcACgkQwhhSWBn3hFF0
|
||||
sQ/+Ol60swz3npgkmQFvMAvOZcW7HcqXfP35gD+ReBkLo0M1Ei0GezFSU4WQFpNK
|
||||
++r7XxEYgOvlK3f5wuNmec4ahHRhj4pwATOU4zQYyvXXw7oF36nrUKqkDehXQESt
|
||||
XeOZR7bzc4HDqrX7YeUMwC/VbXGlGEZvRSkFLY69dCfMAdLmGqRLCcH2izlSK1q5
|
||||
3+TWTG9L8iSUCJ1veezHoJAO+XHcG/FnxZRYPPi6qsCg7KvnHDYb3NVmBtpXy3uL
|
||||
mYd6CiJ7WZBaOjWRV6xnXpu4qh6Kt7Tx4hxsVg0FxBF5PDpPO6cc4mhKDh9Jc+GP
|
||||
eDw+Mki7De5I9tHVxXwPJHC0tcSiC6WcLYv4keHaDs8N6cqY20/alkHJADukzsI8
|
||||
NkCxLQgh5oKzafaQXQjibrUue3HXtddPuTk/kmX34vsbAZbPu/HG2+xySklXotPx
|
||||
imEFaA8D9NgjW8GwcNUl19oFYpUT5SylEkgCEM8iwkc3Dj5j6tsPOxrFcZztBOym
|
||||
RZJEt8oCQEtxL/Ensc8NYK7s0xXqnynCFvMVDngbJQ9siQaGwyu7obpxEw6IHWkH
|
||||
lc3IxVaZKocpLFpN8QR2jJLiCK7WHb9YtnEuwk4q7WezUGxWbE0Q7Bfo64EKrwky
|
||||
5oirsQ6T/5ez1MltcNNDQa9+c0y9NmithivJJHfEIn2O7uuJAjMEEAEKAB0WIQTE
|
||||
H8IbJrqdmqrRrrdqNUoiHvvuqAUCWszMpgAKCRBqNUoiHvvuqNE8D/41X8a9x54+
|
||||
QqPEcqxSwU/mv1pyYwFa2DIN12/eZ7es3bBNHWKdSOL97M/Gtc4GUrFQL7oIrUC7
|
||||
fC5CwQ1HLa+piu1ZL/JzfVyHO4DhiiWkWPLwGVGW6htkk6hP1Nh5WcRxliEEwpXQ
|
||||
emgRdKBv65xr52choVKAxeL+pdh8zSDUg4txH7ABb6m0HNjQpKnGSqepyavAk+Ix
|
||||
u3ATENxjRwCMd2XfkwxIV7XYpl1JPhkZJxpenO8H3kk96ILqSo9dprrVuBQm14ba
|
||||
fzkJnQ715Jle3ZBLJpBqmXw8uQjZybsLubXars6oTa+s4gAOdLYpNmEjsmHqkllu
|
||||
+5i/GhzS7Vqh+ZXQh5hxaYTl9PQeN/wDD4reXsMQEBCz8RfLFnolSiZMkRBEzyVL
|
||||
uJjA+24XRDpzofkeyaknz7MifJ6p/iLB2a27VhaiFPywiNg0fNZKtpBJd68nQH5K
|
||||
8RGOxlTdGicVuh1AG0Qk1L8tn0kzpE5H9cJcXCtcX9fvZI3q3BmOwyG4oS/4rAk3
|
||||
KGw5Tm4zhNV/7VoWZR4xIEgV8U6O0J7InpuZ6qkGGZ7qAWjGBLfbqlIm8t/wfvqX
|
||||
gJ5kALPFK1eegNv9EW5wgf/wYu0f90LOVu/0C13zXf6jhKv1YsPY785qA1cOAyJC
|
||||
7eP75FcHVV8xdWesbLgHAV2+S55Hl3zlD4kBUwQTAQIAPQIbAwYLCQgHAwIEFQII
|
||||
AwQWAgMBAh4BAheAFiEEo8Tw+XnKoizbqPUS7oy8noht3YkFAltn6jwFCRhLy9EA
|
||||
CgkQ7oy8noht3YkhfAf+L/XXwlc/4k/sWL3A4Kxe2LejqrrfSGdzo6A9JQTkwuGz
|
||||
b5t2UbynACNpbYxFlbdlg2zOH2rBx72Yjg4EYSyzPEOmCMvwAO3ekBmreO8UyPV3
|
||||
8b3c6mss9JxTenkKokFtBqsAnUhryykaGlQ8fZs87oXbOtpHZL48DG2TlSiQ2k4j
|
||||
3YjiXnsHlPZpDPfVHrU1wlcxciI3SEPQNUxcRwHXkGtAcXK2P4fmRcDSXcgISh43
|
||||
Dg9ikV3yPLlJuxa887/uQe2ytHNOCgC9GhGyCOfQV09lr7mKpfJmz2YR0xZ+NGd6
|
||||
n5Tvs5GpKwoc30zo9eOQf6TAnQAX6w0NWHhKQEJCFYkBMwQQAQoAHRYhBIOZbqYq
|
||||
gaZcXFp0j2nPQzY7zTQkBQJcP+D4AAoJEGnPQzY7zTQk0TAIAI41zJkJuXpBfASU
|
||||
sr6n2BcXWPvodKDg1mQ+qJNPiLYWPCLqau1eYSR5OFXjoBFL8KiIPY3AGjI5jrn0
|
||||
aOityLm4p0PDgLYZ7VnPX2YPrMgIMIbQ471K8OFf9H2mRJp2bCXEIFQXRA75xrB0
|
||||
T/1TLTL+mz/2YF1oCPHU8ElT1nfFqAx0Nd3XpkhNCxn2K5687+6lG2YWjIXDSY5H
|
||||
Hnl4JFtv4DBz4lyvmSz55r2WYcBSEVvhoTLOILvVbC0eAh1JOPAIls6ARuaOSkRP
|
||||
gx+354QnXsNPIXEP1i11MfIufFsJLIN+5lyLOaMpM/BEB5jSEw7DX2N5t5SkONC/
|
||||
VtTkwIeJAjMEEAEIAB0WIQRHvH3oPUYui+0YqoYSJNvSmaT18wUCXDmNnQAKCRAS
|
||||
JNvSmaT18/i3D/0ThbZLyrhhCCkxeS1AwYsTLKz6tzh26z1wNYM1RGhD0OnyRgI4
|
||||
FZDpwyAtMMS+R3wMC/M16Erx1xa5P2uvvUq8azki/rwVzyixtsZBzsTnnGrUOO72
|
||||
RFIz8HNEhbKvPMfmXkWgR1vVQihMIfU3ca4gMLldxbC6+I6vMY8nEgU5MGy39KbZ
|
||||
z87C8fhtdxQqvKvwqebxMgvuLwf0UX6tR2Jn+gTzX6MCOGNJbIChuresPz1MJ1DB
|
||||
MYsIpSUvOE0pt9wCNmUWHEUMGLSXs5N27kYmrNeR/WM7J/Az510kfhTDgteRZHea
|
||||
lnPHeVqgfaD806Zkhb82Q7MNfu+FYo9tGY0KagEn7zQkrkMeVAJzF0+zXXG25FBZ
|
||||
yS5jRBMICEa1XC5r2EORDwSyP8HZvJaMz2/NeclVaGLNNqIpq02/6O9zvyr1Xoo/
|
||||
ZwkF/n6sMP4zAmRO2NJ/t0aaI0g4ytgJ7dcZqGlVXeYSzYmMKPgtvqYwKRMJ+WmQ
|
||||
GBuLOKEQp+lQLCbx/TRU62T46S4vzQSjITk/Huu010xagbrPhw3o4otMGLiJmIZe
|
||||
YxDosDKpimVagPEHQzmZGkDWnBqTFUyTy5rJp9pO+43ZKkCknB4rOirjxu/idjbW
|
||||
XAWb/7cQDTaSvHlFrEw41F0KrrGwTpLJthE81zgXskBNDMsUPSSArH2Hm4kCOQQS
|
||||
AQoAIxYhBCkQSkbFYVv5eKCD8gwgfwey8ytnBQJbrjRTBYMHPoPpAAoJEAwgfwey
|
||||
8ytnerYQAKVWdjbCDxVgzDiahizkfZFaMPL4c3FCQ1ty4OgppDFMqDMMzlYOV3MW
|
||||
4bflgZddfSzvzAPMGDxeoQ0neBt8nRguKxuw2GiZRsMNfyxE9Bu7sBPwKhur/AIH
|
||||
f7ZPkmntXVgWVJJJM7G5l7r+9VwMpaQCH1sNCkccuOHHPGZrk+rGxRKJN/2g39bt
|
||||
ba0z2Sm3N1lkdQaZTmda1lYZ0XODySrKsisW+9iLDaPddZn2FtjM9/pMCm+ASmeU
|
||||
FboDcre48PKD6BC7gLzX+jDU3afQVJjHRBLMjO0fdJAbgFtlD5fZ8xAoKyKHob5M
|
||||
5uhXiFc/XLpwu4FmZ86/ugDY0hbNb9xwf7g3EczVYeRg5Xqce8stMF0upXf081rm
|
||||
ru6RmsTGuIZu0zhEntRK/f0mDejn+D3xlCqBd4gn8UVzQC3X1IK2S41yOgX9lwO0
|
||||
AMUuNcnA4tlcOVfzTXVM3QZ7Ifr2FSVenrbTwXwPgcF5lKGURhX2wnTi/rdA8HG+
|
||||
cprIZ1Iingn0nacKyJMzIZ0x367Ifm5rPOWHeCZJdtC4B3wIn7da4w62AqopD/T1
|
||||
7F82IbkTdDkonwGhRMEJSCRvIWi08+2Dz0F0Gm5WIV0YZIb3Ca8cXdPy+114ru0q
|
||||
GmqyXjmuTiSU9W/u2KqsRSfgvDWqMRMdSavvI0QTqLI45H3CBRO9iQFTBBMBCgA9
|
||||
AhsDBgsJCAcDAgQVAggDBBYCAwECHgECF4AWIQSjxPD5ecqiLNuo9RLujLyeiG3d
|
||||
iQUCX7TTxQUJHJi1WgAKCRDujLyeiG3diVtBB/9+uQeOjXy5EFZrZXXnX2HsdMJX
|
||||
ekP4FHiUMqZ3GA6KM4ypPmnpPfZ9bO+8vg56kVjpt8EzUKme3cs/oqPknoDZXnrA
|
||||
4xlOCOd/oyLSatyAZXlQ5GV5Xr5TAQW2M/Wj2m7vRxO8tHoocmD3sI8/97cpbShg
|
||||
bkyyjJlv0rs695Hws/gsyyxRTPZCtd0HeLBvy4L2ikTubebg9FTIfqq6AIpk/rIl
|
||||
Xh5zio3PapclnrbaWXAHt1dCBiXqAIrDXNlaq6XnMJjXG9CAXtAmK2dbgy57TGgR
|
||||
3JDCH2boYVNp4451ZY6TrGuOG72Dt0KHUhVluEWbm3aYHS4v7L6e2mADRnQYuQEN
|
||||
BEqg7ZABCADa4rFJFIql3Yk7U4NQO7GmlhpxjUmR6bENQQcbfVyoJVO4XPhqU3KX
|
||||
gj7yma1faL5gftb17Du4aCNHM8SNM6bz9nPa5755B6ui966jSHIVr1jcLGE0wITc
|
||||
QfgC592h+4KadR/9btPPIi/N5yvAU+XJmGpaebESq7wVpH6Ncr0mzHZlvL8SKE2g
|
||||
LBA5a12/cjg6LkoFuCXF/ETs+ZiCj0NipOYfGayc+JQTgVhkbbrcuXVmqRvBbvuf
|
||||
AMSXW6H62Ns675jVwrB5xZvJUi5jV4o6fNULzyV1VIrHMo4a7fszLjPrkZMHIxB8
|
||||
wGehn4VkUZiIKJOGP5zyL3cMhHNh46yNABEBAAGJAkQEGAECAA8FAkqg7ZACGwIF
|
||||
CQWjmoABKQkQ7oy8noht3YnAXSAEGQECAAYFAkqg7ZAACgkQdKlBuiGeyBC0EQf5
|
||||
Af/G0/2xz0QwH58N6Cx/ZoMctPbxim+F+MtZWtiZdGJ7G1wFGILAtPqSG6WEDa+T
|
||||
hOeHbZ1uGvzuFS24IlkZHljgTZlL30p8DFdy73pajoqLRfrrkb9DJTGgVhP2axhn
|
||||
OW/Q6Zu4hoQPSn2VGVOVmuwMb3r1r93fQbw0bQy/oIf9J+q2rbp4/chOodd7XMW9
|
||||
5VMwiWIEdpYaD0moeK7+abYzBTG5ADMuZoK2ZrkteQZNQexSu4h0emWerLsMdvcM
|
||||
LyYiOdWP128+s1e/nibHGFPAeRPkQ+MVPMZlrqgVq9i34XPA9HrtxVBd/PuOHoaS
|
||||
1yrGuADspSZTC5on4PMaQqpkCACiHhL07FWUg+W3uRQLnt+jMOqauaPWfJfPrK+V
|
||||
mZZ3Q5KRXgQ1ciwIq9D/GKcnfqVqLeSFGGF3xrt24q9lETQYKdcCQGqkPdmBpYgF
|
||||
eg71c4zviaADtQDtr93/RaGV3gC37r0WV6BRPU7NlZHHlDz/XaUz+NZIEslo/tmZ
|
||||
yV8/yZlaItJI9qefzoA2aBJFHKYdtgLWo7IIAthchxVK8fbpc6Sopp/9K0GvXM/6
|
||||
Ijpu7H0NMVp7PGwuFbtmbwLR3GkyePmQeoMs6T1wn/l06JSIJVbZGcQC72d0KQrX
|
||||
Y5rB2h/PKvrIgmmcvpOwDm4WpSizPas48p54M62u5Kjj3Q9MiQJEBBgBAgAPAhsC
|
||||
BQJQPjNzBQkJX6zhASnAXSAEGQECAAYFAkqg7ZAACgkQdKlBuiGeyBC0EQf5Af/G
|
||||
0/2xz0QwH58N6Cx/ZoMctPbxim+F+MtZWtiZdGJ7G1wFGILAtPqSG6WEDa+ThOeH
|
||||
bZ1uGvzuFS24IlkZHljgTZlL30p8DFdy73pajoqLRfrrkb9DJTGgVhP2axhnOW/Q
|
||||
6Zu4hoQPSn2VGVOVmuwMb3r1r93fQbw0bQy/oIf9J+q2rbp4/chOodd7XMW95VMw
|
||||
iWIEdpYaD0moeK7+abYzBTG5ADMuZoK2ZrkteQZNQexSu4h0emWerLsMdvcMLyYi
|
||||
OdWP128+s1e/nibHGFPAeRPkQ+MVPMZlrqgVq9i34XPA9HrtxVBd/PuOHoaS1yrG
|
||||
uADspSZTC5on4PMaQgkQ7oy8noht3Yn+Nwf/bLfZW9RUqCQAmw1L5QLfMYb3GAIF
|
||||
qx/h34y3MBToEzXqnfSEkZGM1iZtIgO1i3oVOGVlaGaE+wQKhg6zJZ6oTOZ+/ufR
|
||||
O/xdmfGHZdlAfUEau/YiLknElEUNAQdUNuMB9TUtmBvh00aYoOjzRoAentTS+/3p
|
||||
3+iQXK8NPJjQWBNToUVUQiYD9bBCIK/aHhBhmdEc0YfcWyQgd6IL7547BRJbPDju
|
||||
OyAfRWLJ17uJMGYqOFTkputmpG8n0dG0yUcUI4MoA8U79iG83EAd5vTS1eJiTmc+
|
||||
PLBneknviBEBiSRO4Yu5q4QxksOqYhFYBzOj6HXwgJCczVEZUCnuW7kHw4kCRAQY
|
||||
AQIADwIbAgUCVANGwQUJEOcnLwEpwF0gBBkBAgAGBQJKoO2QAAoJEHSpQbohnsgQ
|
||||
tBEH+QH/xtP9sc9EMB+fDegsf2aDHLT28YpvhfjLWVrYmXRiextcBRiCwLT6khul
|
||||
hA2vk4Tnh22dbhr87hUtuCJZGR5Y4E2ZS99KfAxXcu96Wo6Ki0X665G/QyUxoFYT
|
||||
9msYZzlv0OmbuIaED0p9lRlTlZrsDG969a/d30G8NG0Mv6CH/Sfqtq26eP3ITqHX
|
||||
e1zFveVTMIliBHaWGg9JqHiu/mm2MwUxuQAzLmaCtma5LXkGTUHsUruIdHplnqy7
|
||||
DHb3DC8mIjnVj9dvPrNXv54mxxhTwHkT5EPjFTzGZa6oFavYt+FzwPR67cVQXfz7
|
||||
jh6GktcqxrgA7KUmUwuaJ+DzGkIJEO6MvJ6Ibd2JiakIAKqtDaLgc796crcZ0vwQ
|
||||
Glf5+H3OBj/sYkyNAByDdN2ZsuO7M1FT4OZcCBHqKScbeSfJQrqSQscSAURU+fTG
|
||||
xNJrEDk9S975YAXiInRk71XawUNWhEqER5vshyLOx9es5FJo/rw7v253t+vzKElN
|
||||
G3NhDnAe4UOQM73W2YfbWI6cikzwiWxHttO0oHByd/nqxMUP2onXQMI8fRRnRQmQ
|
||||
KEzXZq46TVETp6N3WyBu30gjuz1Twq3QsS9Ga7crrhHk4E33FsU0Lq2GDTsT7+rF
|
||||
xdVTTyCVQU33QEdmZYU6SIxTDllyYF1ooqfJWMtwvwFNW6YElduoCCJZNQJ5zR1Q
|
||||
R/mIXgQQFggABgUCWl5mOwAKCRAbuJwGAjZ0SXlRAP4t6mSiQJrMgGQ0WdmtodwI
|
||||
RKBcNbl/x/52k7FlWjlnSwD/UWQ/vQPozDkdtG55shknoxrnojv4eODalVKz68nT
|
||||
nQeJAlsEGAECACYCGwIWIQSjxPD5ecqiLNuo9RLujLyeiG3diQUCW2fqRQUJFRpo
|
||||
tQEpwF0gBBkBAgAGBQJKoO2QAAoJEHSpQbohnsgQtBEH+QH/xtP9sc9EMB+fDegs
|
||||
f2aDHLT28YpvhfjLWVrYmXRiextcBRiCwLT6khulhA2vk4Tnh22dbhr87hUtuCJZ
|
||||
GR5Y4E2ZS99KfAxXcu96Wo6Ki0X665G/QyUxoFYT9msYZzlv0OmbuIaED0p9lRlT
|
||||
lZrsDG969a/d30G8NG0Mv6CH/Sfqtq26eP3ITqHXe1zFveVTMIliBHaWGg9JqHiu
|
||||
/mm2MwUxuQAzLmaCtma5LXkGTUHsUruIdHplnqy7DHb3DC8mIjnVj9dvPrNXv54m
|
||||
xxhTwHkT5EPjFTzGZa6oFavYt+FzwPR67cVQXfz7jh6GktcqxrgA7KUmUwuaJ+Dz
|
||||
GkIJEO6MvJ6Ibd2JyVcH/3+imOYpKAPY7NjDLswbjrqKKcD8SL5trPd+811ST03U
|
||||
9/PRjoRsYZqGQ9eMg4KN6Rx0lDipTldC7YfqdBP4YidfdsJ/6MDEOVuzUHewWwHr
|
||||
aBVoMI68YG7dD3RMA0/xAqn5QsDEyZHldLEZjq/qXCJAkqqG2th9hnYFlmsvo46v
|
||||
W78+jI0P6MW/qAxiJ5eAvNf0vT1pP4MagOPT8NZ6zYTJNeQPE3kiSN9wFMEYcoJ5
|
||||
SwyfOHQqRrZy96XDBCF3F7BfrgcN0h+IQ4z9BSa8yBxcWfDJiuhgO/Ks2JGsrPBA
|
||||
hOkSUbdpxsb2/MzASgbiN00wsGsEejVHxvX7/iOE3rOJAlsEGAEKACYCGwIWIQSj
|
||||
xPD5ecqiLNuo9RLujLyeiG3diQUCX7TT0gUJGANdQgEpwF0gBBkBAgAGBQJKoO2Q
|
||||
AAoJEHSpQbohnsgQtBEH+QH/xtP9sc9EMB+fDegsf2aDHLT28YpvhfjLWVrYmXRi
|
||||
extcBRiCwLT6khulhA2vk4Tnh22dbhr87hUtuCJZGR5Y4E2ZS99KfAxXcu96Wo6K
|
||||
i0X665G/QyUxoFYT9msYZzlv0OmbuIaED0p9lRlTlZrsDG969a/d30G8NG0Mv6CH
|
||||
/Sfqtq26eP3ITqHXe1zFveVTMIliBHaWGg9JqHiu/mm2MwUxuQAzLmaCtma5LXkG
|
||||
TUHsUruIdHplnqy7DHb3DC8mIjnVj9dvPrNXv54mxxhTwHkT5EPjFTzGZa6oFavY
|
||||
t+FzwPR67cVQXfz7jh6GktcqxrgA7KUmUwuaJ+DzGkIJEO6MvJ6Ibd2J7EMH/2sh
|
||||
bVx9NRS36XNfQl6A1AXLCZ0+o4P+7zD1XsimSv2XsEMGzUxBk1FGao61QkXKuTEz
|
||||
Y16bBE8tu7F0EbV6AyGoBdAqNauDZpJxq5OAHx7Od06R8KKil6T+OGGqPdPeEpgG
|
||||
+i9d4hyDtESPeX+a8HDiIEC0czybPVzqvgtw8zTIpfQdaAMzv0ZPwYoU5mBG7SyP
|
||||
ej5JjJj8Lfy/4LHHMRtwvqEqtNuukzePflnn0BR8UTQTQ9WlisRwUJzBdBJA23zh
|
||||
GsFQ52ZUrxmcd65lC/CqYZEFwK0B8OwSzUxRbgFrCVzsizySv+QWXmi7EHd3bow4
|
||||
keSPmmDrjl8cySCNsMo=
|
||||
=R0uO
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
EOF
|
||||
|
||||
${SUDO} apt-get --quiet update
|
||||
${SUDO} apt-get --quiet --yes install tor deb.torproject.org-keyring
|
|
@ -55,7 +55,7 @@ def i2p_network(reactor, temp_dir, request):
|
|||
proto,
|
||||
which("docker"),
|
||||
(
|
||||
"docker", "run", "-p", "7656:7656", "purplei2p/i2pd",
|
||||
"docker", "run", "-p", "7656:7656", "purplei2p/i2pd:release-2.43.0",
|
||||
# Bad URL for reseeds, so it can't talk to other routers.
|
||||
"--reseed.urls", "http://localhost:1/",
|
||||
),
|
||||
|
@ -63,7 +63,7 @@ def i2p_network(reactor, temp_dir, request):
|
|||
|
||||
def cleanup():
|
||||
try:
|
||||
proto.transport.signalProcess("KILL")
|
||||
proto.transport.signalProcess("INT")
|
||||
util.block_with_timeout(proto.exited, reactor)
|
||||
except ProcessExitedAlready:
|
||||
pass
|
||||
|
|
|
@ -19,6 +19,7 @@ from future.utils import PY2
|
|||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
import os.path
|
||||
from posixpath import join
|
||||
from stat import S_ISDIR
|
||||
|
||||
|
@ -33,7 +34,7 @@ import pytest
|
|||
from .util import generate_ssh_key, run_in_thread
|
||||
|
||||
|
||||
def connect_sftp(connect_args={"username": "alice", "password": "password"}):
|
||||
def connect_sftp(connect_args):
|
||||
"""Create an SFTP client."""
|
||||
client = SSHClient()
|
||||
client.set_missing_host_key_policy(AutoAddPolicy)
|
||||
|
@ -60,24 +61,24 @@ def connect_sftp(connect_args={"username": "alice", "password": "password"}):
|
|||
@run_in_thread
|
||||
def test_bad_account_password_ssh_key(alice, tmpdir):
|
||||
"""
|
||||
Can't login with unknown username, wrong password, or wrong SSH pub key.
|
||||
Can't login with unknown username, any password, or wrong SSH pub key.
|
||||
"""
|
||||
# Wrong password, wrong username:
|
||||
for u, p in [("alice", "wrong"), ("someuser", "password")]:
|
||||
# Any password, wrong username:
|
||||
for u, p in [("alice-key", "wrong"), ("someuser", "password")]:
|
||||
with pytest.raises(AuthenticationException):
|
||||
connect_sftp(connect_args={
|
||||
"username": u, "password": p,
|
||||
})
|
||||
|
||||
another_key = join(str(tmpdir), "ssh_key")
|
||||
another_key = os.path.join(str(tmpdir), "ssh_key")
|
||||
generate_ssh_key(another_key)
|
||||
good_key = RSAKey(filename=join(alice.node_dir, "private", "ssh_client_rsa_key"))
|
||||
good_key = RSAKey(filename=os.path.join(alice.node_dir, "private", "ssh_client_rsa_key"))
|
||||
bad_key = RSAKey(filename=another_key)
|
||||
|
||||
# Wrong key:
|
||||
with pytest.raises(AuthenticationException):
|
||||
connect_sftp(connect_args={
|
||||
"username": "alice2", "pkey": bad_key,
|
||||
"username": "alice-key", "pkey": bad_key,
|
||||
})
|
||||
|
||||
# Wrong username:
|
||||
|
@ -86,13 +87,24 @@ def test_bad_account_password_ssh_key(alice, tmpdir):
|
|||
"username": "someoneelse", "pkey": good_key,
|
||||
})
|
||||
|
||||
def sftp_client_key(node):
|
||||
return RSAKey(
|
||||
filename=os.path.join(node.node_dir, "private", "ssh_client_rsa_key"),
|
||||
)
|
||||
|
||||
def test_sftp_client_key_exists(alice, alice_sftp_client_key_path):
|
||||
"""
|
||||
Weakly validate the sftp client key fixture by asserting that *something*
|
||||
exists at the supposed key path.
|
||||
"""
|
||||
assert os.path.exists(alice_sftp_client_key_path)
|
||||
|
||||
@run_in_thread
|
||||
def test_ssh_key_auth(alice):
|
||||
"""It's possible to login authenticating with SSH public key."""
|
||||
key = RSAKey(filename=join(alice.node_dir, "private", "ssh_client_rsa_key"))
|
||||
key = sftp_client_key(alice)
|
||||
sftp = connect_sftp(connect_args={
|
||||
"username": "alice2", "pkey": key
|
||||
"username": "alice-key", "pkey": key
|
||||
})
|
||||
assert sftp.listdir() == []
|
||||
|
||||
|
@ -100,7 +112,10 @@ def test_ssh_key_auth(alice):
|
|||
@run_in_thread
|
||||
def test_read_write_files(alice):
|
||||
"""It's possible to upload and download files."""
|
||||
sftp = connect_sftp()
|
||||
sftp = connect_sftp(connect_args={
|
||||
"username": "alice-key",
|
||||
"pkey": sftp_client_key(alice),
|
||||
})
|
||||
with sftp.file("myfile", "wb") as f:
|
||||
f.write(b"abc")
|
||||
f.write(b"def")
|
||||
|
@ -117,7 +132,10 @@ def test_directories(alice):
|
|||
It's possible to create, list directories, and create and remove files in
|
||||
them.
|
||||
"""
|
||||
sftp = connect_sftp()
|
||||
sftp = connect_sftp(connect_args={
|
||||
"username": "alice-key",
|
||||
"pkey": sftp_client_key(alice),
|
||||
})
|
||||
assert sftp.listdir() == []
|
||||
|
||||
sftp.mkdir("childdir")
|
||||
|
@ -148,7 +166,10 @@ def test_directories(alice):
|
|||
@run_in_thread
|
||||
def test_rename(alice):
|
||||
"""Directories and files can be renamed."""
|
||||
sftp = connect_sftp()
|
||||
sftp = connect_sftp(connect_args={
|
||||
"username": "alice-key",
|
||||
"pkey": sftp_client_key(alice),
|
||||
})
|
||||
sftp.mkdir("dir")
|
||||
|
||||
filepath = join("dir", "file")
|
||||
|
|
|
@ -35,10 +35,16 @@ from allmydata.test.common import (
|
|||
if sys.platform.startswith('win'):
|
||||
pytest.skip('Skipping Tor tests on Windows', allow_module_level=True)
|
||||
|
||||
if PY2:
|
||||
pytest.skip('Skipping Tor tests on Python 2 because dependencies are hard to come by', allow_module_level=True)
|
||||
|
||||
@pytest_twisted.inlineCallbacks
|
||||
def test_onion_service_storage(reactor, request, temp_dir, flog_gatherer, tor_network, tor_introducer_furl):
|
||||
yield _create_anonymous_node(reactor, 'carol', 8008, request, temp_dir, flog_gatherer, tor_network, tor_introducer_furl)
|
||||
yield _create_anonymous_node(reactor, 'dave', 8009, request, temp_dir, flog_gatherer, tor_network, tor_introducer_furl)
|
||||
carol = yield _create_anonymous_node(reactor, 'carol', 8008, request, temp_dir, flog_gatherer, tor_network, tor_introducer_furl)
|
||||
dave = yield _create_anonymous_node(reactor, 'dave', 8009, request, temp_dir, flog_gatherer, tor_network, tor_introducer_furl)
|
||||
util.await_client_ready(carol, minimum_number_of_servers=2)
|
||||
util.await_client_ready(dave, minimum_number_of_servers=2)
|
||||
|
||||
# ensure both nodes are connected to "a grid" by uploading
|
||||
# something via carol, and retrieve it using dave.
|
||||
gold_path = join(temp_dir, "gold")
|
||||
|
@ -140,5 +146,6 @@ shares.total = 2
|
|||
f.write(node_config)
|
||||
|
||||
print("running")
|
||||
yield util._run_node(reactor, node_dir.path, request, None)
|
||||
result = yield util._run_node(reactor, node_dir.path, request, None)
|
||||
print("okay, launched")
|
||||
return result
|
||||
|
|
|
@ -482,14 +482,15 @@ def web_post(tahoe, uri_fragment, **kwargs):
|
|||
return resp.content
|
||||
|
||||
|
||||
def await_client_ready(tahoe, timeout=10, liveness=60*2):
|
||||
def await_client_ready(tahoe, timeout=10, liveness=60*2, minimum_number_of_servers=1):
|
||||
"""
|
||||
Uses the status API to wait for a client-type node (in `tahoe`, a
|
||||
`TahoeProcess` instance usually from a fixture e.g. `alice`) to be
|
||||
'ready'. A client is deemed ready if:
|
||||
|
||||
- it answers `http://<node_url>/statistics/?t=json/`
|
||||
- there is at least one storage-server connected
|
||||
- there is at least one storage-server connected (configurable via
|
||||
``minimum_number_of_servers``)
|
||||
- every storage-server has a "last_received_data" and it is
|
||||
within the last `liveness` seconds
|
||||
|
||||
|
@ -506,8 +507,8 @@ def await_client_ready(tahoe, timeout=10, liveness=60*2):
|
|||
time.sleep(1)
|
||||
continue
|
||||
|
||||
if len(js['servers']) == 0:
|
||||
print("waiting because no servers at all")
|
||||
if len(js['servers']) < minimum_number_of_servers:
|
||||
print("waiting because insufficient servers")
|
||||
time.sleep(1)
|
||||
continue
|
||||
server_times = [
|
||||
|
|
|
@ -26,10 +26,10 @@ python run-deprecations.py [--warnings=STDERRFILE] [--package=PYTHONPACKAGE ] CO
|
|||
class RunPP(protocol.ProcessProtocol):
|
||||
def outReceived(self, data):
|
||||
self.stdout.write(data)
|
||||
sys.stdout.write(data)
|
||||
sys.stdout.write(str(data, sys.stdout.encoding))
|
||||
def errReceived(self, data):
|
||||
self.stderr.write(data)
|
||||
sys.stderr.write(data)
|
||||
sys.stderr.write(str(data, sys.stdout.encoding))
|
||||
def processEnded(self, reason):
|
||||
signal = reason.value.signal
|
||||
rc = reason.value.exitCode
|
||||
|
@ -100,17 +100,19 @@ def run_command(main):
|
|||
|
||||
pp.stdout.seek(0)
|
||||
for line in pp.stdout.readlines():
|
||||
line = str(line, sys.stdout.encoding)
|
||||
if match(line):
|
||||
add(line) # includes newline
|
||||
|
||||
pp.stderr.seek(0)
|
||||
for line in pp.stderr.readlines():
|
||||
line = str(line, sys.stdout.encoding)
|
||||
if match(line):
|
||||
add(line)
|
||||
|
||||
if warnings:
|
||||
if config["warnings"]:
|
||||
with open(config["warnings"], "wb") as f:
|
||||
with open(config["warnings"], "w") as f:
|
||||
print("".join(warnings), file=f)
|
||||
print("ERROR: %d deprecation warnings found" % len(warnings))
|
||||
sys.exit(1)
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
#
|
||||
# this updates the (tagged) version of the software
|
||||
#
|
||||
# Any "options" are hard-coded in here (e.g. the GnuPG key to use)
|
||||
#
|
||||
|
||||
author = "meejah <meejah@meejah.ca>"
|
||||
|
||||
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime
|
||||
from packaging.version import Version
|
||||
|
||||
from dulwich.repo import Repo
|
||||
from dulwich.porcelain import (
|
||||
tag_list,
|
||||
tag_create,
|
||||
status,
|
||||
)
|
||||
|
||||
from twisted.internet.task import (
|
||||
react,
|
||||
)
|
||||
from twisted.internet.defer import (
|
||||
ensureDeferred,
|
||||
)
|
||||
|
||||
|
||||
def existing_tags(git):
|
||||
versions = sorted(
|
||||
Version(v.decode("utf8").lstrip("tahoe-lafs-"))
|
||||
for v in tag_list(git)
|
||||
if v.startswith(b"tahoe-lafs-")
|
||||
)
|
||||
return versions
|
||||
|
||||
|
||||
def create_new_version(git):
|
||||
versions = existing_tags(git)
|
||||
biggest = versions[-1]
|
||||
|
||||
return Version(
|
||||
"{}.{}.{}".format(
|
||||
biggest.major,
|
||||
biggest.minor + 1,
|
||||
0,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
async def main(reactor):
|
||||
git = Repo(".")
|
||||
|
||||
st = status(git)
|
||||
if any(st.staged.values()) or st.unstaged:
|
||||
print("unclean checkout; aborting")
|
||||
raise SystemExit(1)
|
||||
|
||||
v = create_new_version(git)
|
||||
if "--no-tag" in sys.argv:
|
||||
print(v)
|
||||
return
|
||||
|
||||
print("Existing tags: {}".format("\n".join(str(x) for x in existing_tags(git))))
|
||||
print("New tag will be {}".format(v))
|
||||
|
||||
# the "tag time" is seconds from the epoch .. we quantize these to
|
||||
# the start of the day in question, in UTC.
|
||||
now = datetime.now()
|
||||
s = now.utctimetuple()
|
||||
ts = int(
|
||||
time.mktime(
|
||||
time.struct_time((s.tm_year, s.tm_mon, s.tm_mday, 0, 0, 0, 0, s.tm_yday, 0))
|
||||
)
|
||||
)
|
||||
tag_create(
|
||||
repo=git,
|
||||
tag="tahoe-lafs-{}".format(str(v)).encode("utf8"),
|
||||
author=author.encode("utf8"),
|
||||
message="Release {}".format(v).encode("utf8"),
|
||||
annotated=True,
|
||||
objectish=b"HEAD",
|
||||
sign=author.encode("utf8"),
|
||||
tag_time=ts,
|
||||
tag_timezone=0,
|
||||
)
|
||||
|
||||
print("Tag created locally, it is not pushed")
|
||||
print("To push it run something like:")
|
||||
print(" git push origin {}".format(v))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
react(lambda r: ensureDeferred(main(r)))
|
|
@ -1,522 +0,0 @@
|
|||
from __future__ import print_function
|
||||
|
||||
import os, shutil, sys, urllib, time, stat, urlparse
|
||||
|
||||
# Python 2 compatibility
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import str # noqa: F401
|
||||
from six.moves import cStringIO as StringIO
|
||||
|
||||
from twisted.python.filepath import (
|
||||
FilePath,
|
||||
)
|
||||
from twisted.internet import defer, reactor, protocol, error
|
||||
from twisted.application import service, internet
|
||||
from twisted.web import client as tw_client
|
||||
from twisted.python import log, procutils
|
||||
from foolscap.api import Tub, fireEventually, flushEventualQueue
|
||||
|
||||
from allmydata import client, introducer
|
||||
from allmydata.immutable import upload
|
||||
from allmydata.scripts import create_node
|
||||
from allmydata.util import fileutil, pollmixin
|
||||
from allmydata.util.fileutil import abspath_expanduser_unicode
|
||||
from allmydata.util.encodingutil import get_filesystem_encoding
|
||||
|
||||
from allmydata.scripts.common import (
|
||||
write_introducer,
|
||||
)
|
||||
|
||||
class StallableHTTPGetterDiscarder(tw_client.HTTPPageGetter, object):
|
||||
full_speed_ahead = False
|
||||
_bytes_so_far = 0
|
||||
stalled = None
|
||||
def handleResponsePart(self, data):
|
||||
self._bytes_so_far += len(data)
|
||||
if not self.factory.do_stall:
|
||||
return
|
||||
if self.full_speed_ahead:
|
||||
return
|
||||
if self._bytes_so_far > 1e6+100:
|
||||
if not self.stalled:
|
||||
print("STALLING")
|
||||
self.transport.pauseProducing()
|
||||
self.stalled = reactor.callLater(10.0, self._resume_speed)
|
||||
def _resume_speed(self):
|
||||
print("RESUME SPEED")
|
||||
self.stalled = None
|
||||
self.full_speed_ahead = True
|
||||
self.transport.resumeProducing()
|
||||
def handleResponseEnd(self):
|
||||
if self.stalled:
|
||||
print("CANCEL")
|
||||
self.stalled.cancel()
|
||||
self.stalled = None
|
||||
return tw_client.HTTPPageGetter.handleResponseEnd(self)
|
||||
|
||||
class StallableDiscardingHTTPClientFactory(tw_client.HTTPClientFactory, object):
|
||||
protocol = StallableHTTPGetterDiscarder
|
||||
|
||||
def discardPage(url, stall=False, *args, **kwargs):
|
||||
"""Start fetching the URL, but stall our pipe after the first 1MB.
|
||||
Wait 10 seconds, then resume downloading (and discarding) everything.
|
||||
"""
|
||||
# adapted from twisted.web.client.getPage . We can't just wrap or
|
||||
# subclass because it provides no way to override the HTTPClientFactory
|
||||
# that it creates.
|
||||
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
|
||||
assert scheme == 'http'
|
||||
host, port = netloc, 80
|
||||
if ":" in host:
|
||||
host, port = host.split(":")
|
||||
port = int(port)
|
||||
factory = StallableDiscardingHTTPClientFactory(url, *args, **kwargs)
|
||||
factory.do_stall = stall
|
||||
reactor.connectTCP(host, port, factory)
|
||||
return factory.deferred
|
||||
|
||||
class ChildDidNotStartError(Exception):
|
||||
pass
|
||||
|
||||
class SystemFramework(pollmixin.PollMixin):
|
||||
numnodes = 7
|
||||
|
||||
def __init__(self, basedir, mode):
|
||||
self.basedir = basedir = abspath_expanduser_unicode(str(basedir))
|
||||
if not (basedir + os.path.sep).startswith(abspath_expanduser_unicode(u".") + os.path.sep):
|
||||
raise AssertionError("safety issue: basedir must be a subdir")
|
||||
self.testdir = testdir = os.path.join(basedir, "test")
|
||||
if os.path.exists(testdir):
|
||||
shutil.rmtree(testdir)
|
||||
fileutil.make_dirs(testdir)
|
||||
self.sparent = service.MultiService()
|
||||
self.sparent.startService()
|
||||
self.proc = None
|
||||
self.tub = Tub()
|
||||
self.tub.setOption("expose-remote-exception-types", False)
|
||||
self.tub.setServiceParent(self.sparent)
|
||||
self.mode = mode
|
||||
self.failed = False
|
||||
self.keepalive_file = None
|
||||
|
||||
def run(self):
|
||||
framelog = os.path.join(self.basedir, "driver.log")
|
||||
log.startLogging(open(framelog, "a"), setStdout=False)
|
||||
log.msg("CHECK_MEMORY(mode=%s) STARTING" % self.mode)
|
||||
#logfile = open(os.path.join(self.testdir, "log"), "w")
|
||||
#flo = log.FileLogObserver(logfile)
|
||||
#log.startLoggingWithObserver(flo.emit, setStdout=False)
|
||||
d = fireEventually()
|
||||
d.addCallback(lambda res: self.setUp())
|
||||
d.addCallback(lambda res: self.record_initial_memusage())
|
||||
d.addCallback(lambda res: self.make_nodes())
|
||||
d.addCallback(lambda res: self.wait_for_client_connected())
|
||||
d.addCallback(lambda res: self.do_test())
|
||||
d.addBoth(self.tearDown)
|
||||
def _err(err):
|
||||
self.failed = err
|
||||
log.err(err)
|
||||
print(err)
|
||||
d.addErrback(_err)
|
||||
def _done(res):
|
||||
reactor.stop()
|
||||
return res
|
||||
d.addBoth(_done)
|
||||
reactor.run()
|
||||
if self.failed:
|
||||
# raiseException doesn't work for CopiedFailures
|
||||
self.failed.raiseException()
|
||||
|
||||
def setUp(self):
|
||||
#print("STARTING")
|
||||
self.stats = {}
|
||||
self.statsfile = open(os.path.join(self.basedir, "stats.out"), "a")
|
||||
self.make_introducer()
|
||||
d = self.start_client()
|
||||
def _record_control_furl(control_furl):
|
||||
self.control_furl = control_furl
|
||||
#print("OBTAINING '%s'" % (control_furl,))
|
||||
return self.tub.getReference(self.control_furl)
|
||||
d.addCallback(_record_control_furl)
|
||||
def _record_control(control_rref):
|
||||
self.control_rref = control_rref
|
||||
d.addCallback(_record_control)
|
||||
def _ready(res):
|
||||
#print("CLIENT READY")
|
||||
pass
|
||||
d.addCallback(_ready)
|
||||
return d
|
||||
|
||||
def record_initial_memusage(self):
|
||||
print()
|
||||
print("Client started (no connections yet)")
|
||||
d = self._print_usage()
|
||||
d.addCallback(self.stash_stats, "init")
|
||||
return d
|
||||
|
||||
def wait_for_client_connected(self):
|
||||
print()
|
||||
print("Client connecting to other nodes..")
|
||||
return self.control_rref.callRemote("wait_for_client_connections",
|
||||
self.numnodes+1)
|
||||
|
||||
def tearDown(self, passthrough):
|
||||
# the client node will shut down in a few seconds
|
||||
#os.remove(os.path.join(self.clientdir, client.Client.EXIT_TRIGGER_FILE))
|
||||
log.msg("shutting down SystemTest services")
|
||||
if self.keepalive_file and os.path.exists(self.keepalive_file):
|
||||
age = time.time() - os.stat(self.keepalive_file)[stat.ST_MTIME]
|
||||
log.msg("keepalive file at shutdown was %ds old" % age)
|
||||
d = defer.succeed(None)
|
||||
if self.proc:
|
||||
d.addCallback(lambda res: self.kill_client())
|
||||
d.addCallback(lambda res: self.sparent.stopService())
|
||||
d.addCallback(lambda res: flushEventualQueue())
|
||||
def _close_statsfile(res):
|
||||
self.statsfile.close()
|
||||
d.addCallback(_close_statsfile)
|
||||
d.addCallback(lambda res: passthrough)
|
||||
return d
|
||||
|
||||
def make_introducer(self):
|
||||
iv_basedir = os.path.join(self.testdir, "introducer")
|
||||
os.mkdir(iv_basedir)
|
||||
self.introducer = introducer.IntroducerNode(basedir=iv_basedir)
|
||||
self.introducer.setServiceParent(self)
|
||||
self.introducer_furl = self.introducer.introducer_url
|
||||
|
||||
def make_nodes(self):
|
||||
root = FilePath(self.testdir)
|
||||
self.nodes = []
|
||||
for i in range(self.numnodes):
|
||||
nodedir = root.child("node%d" % (i,))
|
||||
private = nodedir.child("private")
|
||||
private.makedirs()
|
||||
write_introducer(nodedir, "default", self.introducer_url)
|
||||
config = (
|
||||
"[client]\n"
|
||||
"shares.happy = 1\n"
|
||||
"[storage]\n"
|
||||
)
|
||||
# the only tests for which we want the internal nodes to actually
|
||||
# retain shares are the ones where somebody's going to download
|
||||
# them.
|
||||
if self.mode in ("download", "download-GET", "download-GET-slow"):
|
||||
# retain shares
|
||||
pass
|
||||
else:
|
||||
# for these tests, we tell the storage servers to pretend to
|
||||
# accept shares, but really just throw them out, since we're
|
||||
# only testing upload and not download.
|
||||
config += "debug_discard = true\n"
|
||||
if self.mode in ("receive",):
|
||||
# for this mode, the client-under-test gets all the shares,
|
||||
# so our internal nodes can refuse requests
|
||||
config += "readonly = true\n"
|
||||
nodedir.child("tahoe.cfg").setContent(config)
|
||||
c = client.Client(basedir=nodedir.path)
|
||||
c.setServiceParent(self)
|
||||
self.nodes.append(c)
|
||||
# the peers will start running, eventually they will connect to each
|
||||
# other and the introducer
|
||||
|
||||
def touch_keepalive(self):
|
||||
if os.path.exists(self.keepalive_file):
|
||||
age = time.time() - os.stat(self.keepalive_file)[stat.ST_MTIME]
|
||||
log.msg("touching keepalive file, was %ds old" % age)
|
||||
f = open(self.keepalive_file, "w")
|
||||
f.write("""\
|
||||
If the node notices this file at startup, it will poll every 5 seconds and
|
||||
terminate if the file is more than 10 seconds old, or if it has been deleted.
|
||||
If the test harness has an internal failure and neglects to kill off the node
|
||||
itself, this helps to avoid leaving processes lying around. The contents of
|
||||
this file are ignored.
|
||||
""")
|
||||
f.close()
|
||||
|
||||
def start_client(self):
|
||||
# this returns a Deferred that fires with the client's control.furl
|
||||
log.msg("MAKING CLIENT")
|
||||
# self.testdir is an absolute Unicode path
|
||||
clientdir = self.clientdir = os.path.join(self.testdir, u"client")
|
||||
clientdir_str = clientdir.encode(get_filesystem_encoding())
|
||||
quiet = StringIO()
|
||||
create_node.create_node({'basedir': clientdir}, out=quiet)
|
||||
log.msg("DONE MAKING CLIENT")
|
||||
write_introducer(clientdir, "default", self.introducer_furl)
|
||||
# now replace tahoe.cfg
|
||||
# set webport=0 and then ask the node what port it picked.
|
||||
f = open(os.path.join(clientdir, "tahoe.cfg"), "w")
|
||||
f.write("[node]\n"
|
||||
"web.port = tcp:0:interface=127.0.0.1\n"
|
||||
"[client]\n"
|
||||
"shares.happy = 1\n"
|
||||
"[storage]\n"
|
||||
)
|
||||
|
||||
if self.mode in ("upload-self", "receive"):
|
||||
# accept and store shares, to trigger the memory consumption bugs
|
||||
pass
|
||||
else:
|
||||
# don't accept any shares
|
||||
f.write("readonly = true\n")
|
||||
## also, if we do receive any shares, throw them away
|
||||
#f.write("debug_discard = true")
|
||||
if self.mode == "upload-self":
|
||||
pass
|
||||
f.close()
|
||||
self.keepalive_file = os.path.join(clientdir,
|
||||
client.Client.EXIT_TRIGGER_FILE)
|
||||
# now start updating the mtime.
|
||||
self.touch_keepalive()
|
||||
ts = internet.TimerService(1.0, self.touch_keepalive)
|
||||
ts.setServiceParent(self.sparent)
|
||||
|
||||
pp = ClientWatcher()
|
||||
self.proc_done = pp.d = defer.Deferred()
|
||||
logfile = os.path.join(self.basedir, "client.log")
|
||||
tahoes = procutils.which("tahoe")
|
||||
if not tahoes:
|
||||
raise RuntimeError("unable to find a 'tahoe' executable")
|
||||
cmd = [tahoes[0], "run", ".", "-l", logfile]
|
||||
env = os.environ.copy()
|
||||
self.proc = reactor.spawnProcess(pp, cmd[0], cmd, env, path=clientdir_str)
|
||||
log.msg("CLIENT STARTED")
|
||||
|
||||
# now we wait for the client to get started. we're looking for the
|
||||
# control.furl file to appear.
|
||||
furl_file = os.path.join(clientdir, "private", "control.furl")
|
||||
url_file = os.path.join(clientdir, "node.url")
|
||||
def _check():
|
||||
if pp.ended and pp.ended.value.status != 0:
|
||||
# the twistd process ends normally (with rc=0) if the child
|
||||
# is successfully launched. It ends abnormally (with rc!=0)
|
||||
# if the child cannot be launched.
|
||||
raise ChildDidNotStartError("process ended while waiting for startup")
|
||||
return os.path.exists(furl_file)
|
||||
d = self.poll(_check, 0.1)
|
||||
# once it exists, wait a moment before we read from it, just in case
|
||||
# it hasn't finished writing the whole thing. Ideally control.furl
|
||||
# would be created in some atomic fashion, or made non-readable until
|
||||
# it's ready, but I can't think of an easy way to do that, and I
|
||||
# think the chances that we'll observe a half-write are pretty low.
|
||||
def _stall(res):
|
||||
d2 = defer.Deferred()
|
||||
reactor.callLater(0.1, d2.callback, None)
|
||||
return d2
|
||||
d.addCallback(_stall)
|
||||
def _read(res):
|
||||
# read the node's URL
|
||||
self.webish_url = open(url_file, "r").read().strip()
|
||||
if self.webish_url[-1] == "/":
|
||||
# trim trailing slash, since the rest of the code wants it gone
|
||||
self.webish_url = self.webish_url[:-1]
|
||||
f = open(furl_file, "r")
|
||||
furl = f.read()
|
||||
return furl.strip()
|
||||
d.addCallback(_read)
|
||||
return d
|
||||
|
||||
|
||||
def kill_client(self):
|
||||
# returns a Deferred that fires when the process exits. This may only
|
||||
# be called once.
|
||||
try:
|
||||
self.proc.signalProcess("INT")
|
||||
except error.ProcessExitedAlready:
|
||||
pass
|
||||
return self.proc_done
|
||||
|
||||
|
||||
def create_data(self, name, size):
|
||||
filename = os.path.join(self.testdir, name + ".data")
|
||||
f = open(filename, "wb")
|
||||
block = "a" * 8192
|
||||
while size > 0:
|
||||
l = min(size, 8192)
|
||||
f.write(block[:l])
|
||||
size -= l
|
||||
return filename
|
||||
|
||||
def stash_stats(self, stats, name):
|
||||
self.statsfile.write("%s %s: %d\n" % (self.mode, name, stats['VmPeak']))
|
||||
self.statsfile.flush()
|
||||
self.stats[name] = stats['VmPeak']
|
||||
|
||||
def POST(self, urlpath, **fields):
|
||||
url = self.webish_url + urlpath
|
||||
sepbase = "boogabooga"
|
||||
sep = "--" + sepbase
|
||||
form = []
|
||||
form.append(sep)
|
||||
form.append('Content-Disposition: form-data; name="_charset"')
|
||||
form.append('')
|
||||
form.append('UTF-8')
|
||||
form.append(sep)
|
||||
for name, value in fields.iteritems():
|
||||
if isinstance(value, tuple):
|
||||
filename, value = value
|
||||
form.append('Content-Disposition: form-data; name="%s"; '
|
||||
'filename="%s"' % (name, filename))
|
||||
else:
|
||||
form.append('Content-Disposition: form-data; name="%s"' % name)
|
||||
form.append('')
|
||||
form.append(value)
|
||||
form.append(sep)
|
||||
form[-1] += "--"
|
||||
body = "\r\n".join(form) + "\r\n"
|
||||
headers = {"content-type": "multipart/form-data; boundary=%s" % sepbase,
|
||||
}
|
||||
return tw_client.getPage(url, method="POST", postdata=body,
|
||||
headers=headers, followRedirect=False)
|
||||
|
||||
def GET_discard(self, urlpath, stall):
|
||||
url = self.webish_url + urlpath + "?filename=dummy-get.out"
|
||||
return discardPage(url, stall)
|
||||
|
||||
def _print_usage(self, res=None):
|
||||
d = self.control_rref.callRemote("get_memory_usage")
|
||||
def _print(stats):
|
||||
print("VmSize: %9d VmPeak: %9d" % (stats["VmSize"],
|
||||
stats["VmPeak"]))
|
||||
return stats
|
||||
d.addCallback(_print)
|
||||
return d
|
||||
|
||||
def _do_upload(self, res, size, files, uris):
|
||||
name = '%d' % size
|
||||
print()
|
||||
print("uploading %s" % name)
|
||||
if self.mode in ("upload", "upload-self"):
|
||||
d = self.control_rref.callRemote("upload_random_data_from_file",
|
||||
size,
|
||||
convergence="check-memory")
|
||||
elif self.mode == "upload-POST":
|
||||
data = "a" * size
|
||||
url = "/uri"
|
||||
d = self.POST(url, t="upload", file=("%d.data" % size, data))
|
||||
elif self.mode in ("receive",
|
||||
"download", "download-GET", "download-GET-slow"):
|
||||
# mode=receive: upload the data from a local peer, so that the
|
||||
# client-under-test receives and stores the shares
|
||||
#
|
||||
# mode=download*: upload the data from a local peer, then have
|
||||
# the client-under-test download it.
|
||||
#
|
||||
# we need to wait until the uploading node has connected to all
|
||||
# peers, since the wait_for_client_connections() above doesn't
|
||||
# pay attention to our self.nodes[] and their connections.
|
||||
files[name] = self.create_data(name, size)
|
||||
u = self.nodes[0].getServiceNamed("uploader")
|
||||
d = self.nodes[0].debug_wait_for_client_connections(self.numnodes+1)
|
||||
d.addCallback(lambda res:
|
||||
u.upload(upload.FileName(files[name],
|
||||
convergence="check-memory")))
|
||||
d.addCallback(lambda results: results.get_uri())
|
||||
else:
|
||||
raise ValueError("unknown mode=%s" % self.mode)
|
||||
def _complete(uri):
|
||||
uris[name] = uri
|
||||
print("uploaded %s" % name)
|
||||
d.addCallback(_complete)
|
||||
return d
|
||||
|
||||
def _do_download(self, res, size, uris):
|
||||
if self.mode not in ("download", "download-GET", "download-GET-slow"):
|
||||
return
|
||||
name = '%d' % size
|
||||
print("downloading %s" % name)
|
||||
uri = uris[name]
|
||||
|
||||
if self.mode == "download":
|
||||
d = self.control_rref.callRemote("download_to_tempfile_and_delete",
|
||||
uri)
|
||||
elif self.mode == "download-GET":
|
||||
url = "/uri/%s" % uri
|
||||
d = self.GET_discard(urllib.quote(url), stall=False)
|
||||
elif self.mode == "download-GET-slow":
|
||||
url = "/uri/%s" % uri
|
||||
d = self.GET_discard(urllib.quote(url), stall=True)
|
||||
|
||||
def _complete(res):
|
||||
print("downloaded %s" % name)
|
||||
return res
|
||||
d.addCallback(_complete)
|
||||
return d
|
||||
|
||||
def do_test(self):
|
||||
#print("CLIENT STARTED")
|
||||
#print("FURL", self.control_furl)
|
||||
#print("RREF", self.control_rref)
|
||||
#print()
|
||||
kB = 1000; MB = 1000*1000
|
||||
files = {}
|
||||
uris = {}
|
||||
|
||||
d = self._print_usage()
|
||||
d.addCallback(self.stash_stats, "0B")
|
||||
|
||||
for i in range(10):
|
||||
d.addCallback(self._do_upload, 10*kB+i, files, uris)
|
||||
d.addCallback(self._do_download, 10*kB+i, uris)
|
||||
d.addCallback(self._print_usage)
|
||||
d.addCallback(self.stash_stats, "10kB")
|
||||
|
||||
for i in range(3):
|
||||
d.addCallback(self._do_upload, 10*MB+i, files, uris)
|
||||
d.addCallback(self._do_download, 10*MB+i, uris)
|
||||
d.addCallback(self._print_usage)
|
||||
d.addCallback(self.stash_stats, "10MB")
|
||||
|
||||
for i in range(1):
|
||||
d.addCallback(self._do_upload, 50*MB+i, files, uris)
|
||||
d.addCallback(self._do_download, 50*MB+i, uris)
|
||||
d.addCallback(self._print_usage)
|
||||
d.addCallback(self.stash_stats, "50MB")
|
||||
|
||||
#for i in range(1):
|
||||
# d.addCallback(self._do_upload, 100*MB+i, files, uris)
|
||||
# d.addCallback(self._do_download, 100*MB+i, uris)
|
||||
# d.addCallback(self._print_usage)
|
||||
#d.addCallback(self.stash_stats, "100MB")
|
||||
|
||||
#d.addCallback(self.stall)
|
||||
def _done(res):
|
||||
print("FINISHING")
|
||||
d.addCallback(_done)
|
||||
return d
|
||||
|
||||
def stall(self, res):
|
||||
d = defer.Deferred()
|
||||
reactor.callLater(5, d.callback, None)
|
||||
return d
|
||||
|
||||
|
||||
class ClientWatcher(protocol.ProcessProtocol, object):
|
||||
ended = False
|
||||
def outReceived(self, data):
|
||||
print("OUT:", data)
|
||||
def errReceived(self, data):
|
||||
print("ERR:", data)
|
||||
def processEnded(self, reason):
|
||||
self.ended = reason
|
||||
self.d.callback(None)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
mode = "upload"
|
||||
if len(sys.argv) > 1:
|
||||
mode = sys.argv[1]
|
||||
if sys.maxsize == 2147483647:
|
||||
bits = "32"
|
||||
elif sys.maxsize == 9223372036854775807:
|
||||
bits = "64"
|
||||
else:
|
||||
bits = "?"
|
||||
print("%s-bit system (sys.maxsize=%d)" % (bits, sys.maxsize))
|
||||
# put the logfile and stats.out in _test_memory/ . These stick around.
|
||||
# put the nodes and other files in _test_memory/test/ . These are
|
||||
# removed each time we run.
|
||||
sf = SystemFramework("_test_memory", mode)
|
||||
sf.run()
|
|
@ -1,234 +0,0 @@
|
|||
from __future__ import print_function
|
||||
|
||||
import os, sys
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.python import log
|
||||
from twisted.application import service
|
||||
from foolscap.api import Tub, fireEventually
|
||||
|
||||
MB = 1000000
|
||||
|
||||
class SpeedTest(object):
|
||||
DO_IMMUTABLE = True
|
||||
DO_MUTABLE_CREATE = True
|
||||
DO_MUTABLE = True
|
||||
|
||||
def __init__(self, test_client_dir):
|
||||
#self.real_stderr = sys.stderr
|
||||
log.startLogging(open("st.log", "a"), setStdout=False)
|
||||
f = open(os.path.join(test_client_dir, "private", "control.furl"), "r")
|
||||
self.control_furl = f.read().strip()
|
||||
f.close()
|
||||
self.base_service = service.MultiService()
|
||||
self.failed = None
|
||||
self.upload_times = {}
|
||||
self.download_times = {}
|
||||
|
||||
def run(self):
|
||||
print("STARTING")
|
||||
d = fireEventually()
|
||||
d.addCallback(lambda res: self.setUp())
|
||||
d.addCallback(lambda res: self.do_test())
|
||||
d.addBoth(self.tearDown)
|
||||
def _err(err):
|
||||
self.failed = err
|
||||
log.err(err)
|
||||
print(err)
|
||||
d.addErrback(_err)
|
||||
def _done(res):
|
||||
reactor.stop()
|
||||
return res
|
||||
d.addBoth(_done)
|
||||
reactor.run()
|
||||
if self.failed:
|
||||
print("EXCEPTION")
|
||||
print(self.failed)
|
||||
sys.exit(1)
|
||||
|
||||
def setUp(self):
|
||||
self.base_service.startService()
|
||||
self.tub = Tub()
|
||||
self.tub.setOption("expose-remote-exception-types", False)
|
||||
self.tub.setServiceParent(self.base_service)
|
||||
d = self.tub.getReference(self.control_furl)
|
||||
def _gotref(rref):
|
||||
self.client_rref = rref
|
||||
print("Got Client Control reference")
|
||||
return self.stall(5)
|
||||
d.addCallback(_gotref)
|
||||
return d
|
||||
|
||||
def stall(self, delay, result=None):
|
||||
d = defer.Deferred()
|
||||
reactor.callLater(delay, d.callback, result)
|
||||
return d
|
||||
|
||||
def record_times(self, times, key):
|
||||
print("TIME (%s): %s up, %s down" % (key, times[0], times[1]))
|
||||
self.upload_times[key], self.download_times[key] = times
|
||||
|
||||
def one_test(self, res, name, count, size, mutable):
|
||||
# values for 'mutable':
|
||||
# False (upload a different CHK file for each 'count')
|
||||
# "create" (upload different contents into a new SSK file)
|
||||
# "upload" (upload different contents into the same SSK file. The
|
||||
# time consumed does not include the creation of the file)
|
||||
d = self.client_rref.callRemote("speed_test", count, size, mutable)
|
||||
d.addCallback(self.record_times, name)
|
||||
return d
|
||||
|
||||
def measure_rtt(self, res):
|
||||
# use RIClient.get_nodeid() to measure the foolscap-level RTT
|
||||
d = self.client_rref.callRemote("measure_peer_response_time")
|
||||
def _got(res):
|
||||
assert len(res) # need at least one peer
|
||||
times = res.values()
|
||||
self.total_rtt = sum(times)
|
||||
self.average_rtt = sum(times) / len(times)
|
||||
self.max_rtt = max(times)
|
||||
print("num-peers: %d" % len(times))
|
||||
print("total-RTT: %f" % self.total_rtt)
|
||||
print("average-RTT: %f" % self.average_rtt)
|
||||
print("max-RTT: %f" % self.max_rtt)
|
||||
d.addCallback(_got)
|
||||
return d
|
||||
|
||||
def do_test(self):
|
||||
print("doing test")
|
||||
d = defer.succeed(None)
|
||||
d.addCallback(self.one_test, "startup", 1, 1000, False) #ignore this one
|
||||
d.addCallback(self.measure_rtt)
|
||||
|
||||
if self.DO_IMMUTABLE:
|
||||
# immutable files
|
||||
d.addCallback(self.one_test, "1x 200B", 1, 200, False)
|
||||
d.addCallback(self.one_test, "10x 200B", 10, 200, False)
|
||||
def _maybe_do_100x_200B(res):
|
||||
if self.upload_times["10x 200B"] < 5:
|
||||
print("10x 200B test went too fast, doing 100x 200B test")
|
||||
return self.one_test(None, "100x 200B", 100, 200, False)
|
||||
return
|
||||
d.addCallback(_maybe_do_100x_200B)
|
||||
d.addCallback(self.one_test, "1MB", 1, 1*MB, False)
|
||||
d.addCallback(self.one_test, "10MB", 1, 10*MB, False)
|
||||
def _maybe_do_100MB(res):
|
||||
if self.upload_times["10MB"] > 30:
|
||||
print("10MB test took too long, skipping 100MB test")
|
||||
return
|
||||
return self.one_test(None, "100MB", 1, 100*MB, False)
|
||||
d.addCallback(_maybe_do_100MB)
|
||||
|
||||
if self.DO_MUTABLE_CREATE:
|
||||
# mutable file creation
|
||||
d.addCallback(self.one_test, "10x 200B SSK creation", 10, 200,
|
||||
"create")
|
||||
|
||||
if self.DO_MUTABLE:
|
||||
# mutable file upload/download
|
||||
d.addCallback(self.one_test, "10x 200B SSK", 10, 200, "upload")
|
||||
def _maybe_do_100x_200B_SSK(res):
|
||||
if self.upload_times["10x 200B SSK"] < 5:
|
||||
print("10x 200B SSK test went too fast, doing 100x 200B SSK")
|
||||
return self.one_test(None, "100x 200B SSK", 100, 200,
|
||||
"upload")
|
||||
return
|
||||
d.addCallback(_maybe_do_100x_200B_SSK)
|
||||
d.addCallback(self.one_test, "1MB SSK", 1, 1*MB, "upload")
|
||||
|
||||
d.addCallback(self.calculate_speeds)
|
||||
return d
|
||||
|
||||
def calculate_speeds(self, res):
|
||||
# time = A*size+B
|
||||
# we assume that A*200bytes is negligible
|
||||
|
||||
if self.DO_IMMUTABLE:
|
||||
# upload
|
||||
if "100x 200B" in self.upload_times:
|
||||
B = self.upload_times["100x 200B"] / 100
|
||||
else:
|
||||
B = self.upload_times["10x 200B"] / 10
|
||||
print("upload per-file time: %.3fs" % B)
|
||||
print("upload per-file times-avg-RTT: %f" % (B / self.average_rtt))
|
||||
print("upload per-file times-total-RTT: %f" % (B / self.total_rtt))
|
||||
A1 = 1*MB / (self.upload_times["1MB"] - B) # in bytes per second
|
||||
print("upload speed (1MB):", self.number(A1, "Bps"))
|
||||
A2 = 10*MB / (self.upload_times["10MB"] - B)
|
||||
print("upload speed (10MB):", self.number(A2, "Bps"))
|
||||
if "100MB" in self.upload_times:
|
||||
A3 = 100*MB / (self.upload_times["100MB"] - B)
|
||||
print("upload speed (100MB):", self.number(A3, "Bps"))
|
||||
|
||||
# download
|
||||
if "100x 200B" in self.download_times:
|
||||
B = self.download_times["100x 200B"] / 100
|
||||
else:
|
||||
B = self.download_times["10x 200B"] / 10
|
||||
print("download per-file time: %.3fs" % B)
|
||||
print("download per-file times-avg-RTT: %f" % (B / self.average_rtt))
|
||||
print("download per-file times-total-RTT: %f" % (B / self.total_rtt))
|
||||
A1 = 1*MB / (self.download_times["1MB"] - B) # in bytes per second
|
||||
print("download speed (1MB):", self.number(A1, "Bps"))
|
||||
A2 = 10*MB / (self.download_times["10MB"] - B)
|
||||
print("download speed (10MB):", self.number(A2, "Bps"))
|
||||
if "100MB" in self.download_times:
|
||||
A3 = 100*MB / (self.download_times["100MB"] - B)
|
||||
print("download speed (100MB):", self.number(A3, "Bps"))
|
||||
|
||||
if self.DO_MUTABLE_CREATE:
|
||||
# SSK creation
|
||||
B = self.upload_times["10x 200B SSK creation"] / 10
|
||||
print("create per-file time SSK: %.3fs" % B)
|
||||
|
||||
if self.DO_MUTABLE:
|
||||
# upload SSK
|
||||
if "100x 200B SSK" in self.upload_times:
|
||||
B = self.upload_times["100x 200B SSK"] / 100
|
||||
else:
|
||||
B = self.upload_times["10x 200B SSK"] / 10
|
||||
print("upload per-file time SSK: %.3fs" % B)
|
||||
A1 = 1*MB / (self.upload_times["1MB SSK"] - B) # in bytes per second
|
||||
print("upload speed SSK (1MB):", self.number(A1, "Bps"))
|
||||
|
||||
# download SSK
|
||||
if "100x 200B SSK" in self.download_times:
|
||||
B = self.download_times["100x 200B SSK"] / 100
|
||||
else:
|
||||
B = self.download_times["10x 200B SSK"] / 10
|
||||
print("download per-file time SSK: %.3fs" % B)
|
||||
A1 = 1*MB / (self.download_times["1MB SSK"] - B) # in bytes per
|
||||
# second
|
||||
print("download speed SSK (1MB):", self.number(A1, "Bps"))
|
||||
|
||||
def number(self, value, suffix=""):
|
||||
scaling = 1
|
||||
if value < 1:
|
||||
fmt = "%1.2g%s"
|
||||
elif value < 100:
|
||||
fmt = "%.1f%s"
|
||||
elif value < 1000:
|
||||
fmt = "%d%s"
|
||||
elif value < 1e6:
|
||||
fmt = "%.2fk%s"; scaling = 1e3
|
||||
elif value < 1e9:
|
||||
fmt = "%.2fM%s"; scaling = 1e6
|
||||
elif value < 1e12:
|
||||
fmt = "%.2fG%s"; scaling = 1e9
|
||||
elif value < 1e15:
|
||||
fmt = "%.2fT%s"; scaling = 1e12
|
||||
elif value < 1e18:
|
||||
fmt = "%.2fP%s"; scaling = 1e15
|
||||
else:
|
||||
fmt = "huge! %g%s"
|
||||
return fmt % (value / scaling, suffix)
|
||||
|
||||
def tearDown(self, res):
|
||||
d = self.base_service.stopService()
|
||||
d.addCallback(lambda ignored: res)
|
||||
return d
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_client_dir = sys.argv[1]
|
||||
st = SpeedTest(test_client_dir)
|
||||
st.run()
|
|
@ -1,20 +0,0 @@
|
|||
#! /usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from foolscap import Tub
|
||||
from foolscap.eventual import eventually
|
||||
import sys
|
||||
from twisted.internet import reactor
|
||||
|
||||
def go():
|
||||
t = Tub()
|
||||
d = t.getReference(sys.argv[1])
|
||||
d.addCallback(lambda rref: rref.callRemote("get_memory_usage"))
|
||||
def _got(res):
|
||||
print(res)
|
||||
reactor.stop()
|
||||
d.addCallback(_got)
|
||||
|
||||
eventually(go)
|
||||
reactor.run()
|
|
@ -1,53 +0,0 @@
|
|||
# Python 3 porting targets
|
||||
#
|
||||
# NOTE: this Makefile requires GNU make
|
||||
|
||||
### Defensive settings for make:
|
||||
# https://tech.davis-hansson.com/p/make/
|
||||
SHELL := bash
|
||||
.ONESHELL:
|
||||
.SHELLFLAGS := -xeu -o pipefail -c
|
||||
.SILENT:
|
||||
.DELETE_ON_ERROR:
|
||||
MAKEFLAGS += --warn-undefined-variables
|
||||
MAKEFLAGS += --no-builtin-rules
|
||||
|
||||
|
||||
# Top-level, phony targets
|
||||
|
||||
.PHONY: default
|
||||
default:
|
||||
@echo "no default target"
|
||||
|
||||
.PHONY: test-py3-all-before
|
||||
## Log the output of running all tests under Python 3 before changes
|
||||
test-py3-all-before: ../../.tox/make-test-py3-all-old.log
|
||||
.PHONY: test-py3-all-diff
|
||||
## Compare the output of running all tests under Python 3 after changes
|
||||
test-py3-all-diff: ../../.tox/make-test-py3-all.diff
|
||||
|
||||
|
||||
# Real targets
|
||||
|
||||
# Gauge the impact of changes on Python 3 compatibility
|
||||
# Compare the output from running all tests under Python 3 before and after changes.
|
||||
# Before changes:
|
||||
# `$ rm -f .tox/make-test-py3-all-*.log && make .tox/make-test-py3-all-old.log`
|
||||
# After changes:
|
||||
# `$ make .tox/make-test-py3-all.diff`
|
||||
$(foreach side,old new,../../.tox/make-test-py3-all-$(side).log):
|
||||
cd "../../"
|
||||
tox --develop --notest -e py36-coverage
|
||||
(make VIRTUAL_ENV=./.tox/py36-coverage TEST_SUITE=allmydata \
|
||||
test-venv-coverage || true) | \
|
||||
sed -E 's/\([0-9]+\.[0-9]{3} secs\)/(#.### secs)/' | \
|
||||
tee "./misc/python3/$(@)"
|
||||
../../.tox/make-test-py3-all.diff: ../../.tox/make-test-py3-all-new.log
|
||||
(diff -u "$(<:%-new.log=%-old.log)" "$(<)" || true) | tee "$(@)"
|
||||
|
||||
# Locate modules that are candidates for naively converting `unicode` -> `str`.
|
||||
# List all Python source files that reference `unicode` but don't reference `str`
|
||||
../../.tox/py3-unicode-no-str.ls:
|
||||
cd "../../"
|
||||
find src -type f -iname '*.py' -exec grep -l -E '\Wunicode\W' '{}' ';' | \
|
||||
xargs grep -L '\Wstr\W' | xargs ls -ld | tee "./misc/python3/$(@)"
|
|
@ -1 +0,0 @@
|
|||
Tahoe-LAFS now requires Twisted 19.10.0 or newer. As a result, it now has a transitive dependency on bcrypt.
|
|
@ -1 +0,0 @@
|
|||
The "Great Black Swamp" proposed specification has been expanded to include two lease management APIs.
|
|
@ -1 +0,0 @@
|
|||
Debian 8 support has been replaced with Debian 10 support.
|
|
@ -1 +0,0 @@
|
|||
Added 'typechecks' environment for tox running mypy and performing static typechecks.
|
|
@ -1 +0,0 @@
|
|||
Tahoe-LAFS no longer depends on Nevow.
|
|
@ -1 +0,0 @@
|
|||
|
|
@ -1 +0,0 @@
|
|||
Tahoe-LAFS now requires the `netifaces` Python package and no longer requires the external `ip`, `ifconfig`, or `route.exe` executables.
|
|
@ -1 +0,0 @@
|
|||
The Tahoe-LAFS project no longer commits to maintaining binary packages for all dependencies at <https://tahoe-lafs.org/deps>. Please use PyPI instead.
|
|
@ -1 +0,0 @@
|
|||
The specification section of the Tahoe-LAFS documentation now includes explicit discussion of the security properties of Foolscap "fURLs" on which it depends.
|
|
@ -1 +0,0 @@
|
|||
The ``[client]introducer.furl`` configuration item is now deprecated in favor of the ``private/introducers.yaml`` file.
|
|
@ -1 +0,0 @@
|
|||
Fix regression that broke flogtool results on Python 2.
|
|
@ -1 +0,0 @@
|
|||
Fix a logging regression on Python 2 involving unicode strings.
|
|
@ -1 +0,0 @@
|
|||
Announcements delivered through the introducer system are no longer automatically annotated with copious information about the Tahoe-LAFS software version nor the versions of its dependencies.
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue