diff --git a/.circleci/Dockerfile.debian b/.circleci/Dockerfile.debian index 96c54736c..abab1f4fa 100644 --- a/.circleci/Dockerfile.debian +++ b/.circleci/Dockerfile.debian @@ -1,7 +1,7 @@ ARG TAG FROM debian:${TAG} ARG PYTHON_VERSION - +ENV DEBIAN_FRONTEND noninteractive ENV WHEELHOUSE_PATH /tmp/wheelhouse ENV VIRTUALENV_PATH /tmp/venv # This will get updated by the CircleCI checkout step. @@ -18,15 +18,11 @@ RUN apt-get --quiet update && \ libffi-dev \ libssl-dev \ libyaml-dev \ - virtualenv + virtualenv \ + tor # Get the project source. This is better than it seems. CircleCI will # *update* this checkout on each job run, saving us more time per-job. COPY . ${BUILD_SRC_ROOT} RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python${PYTHON_VERSION}" - -# Only the integration tests currently need this but it doesn't hurt to always -# have it present and it's simpler than building a whole extra image just for -# the integration tests. -RUN ${BUILD_SRC_ROOT}/integration/install-tor.sh diff --git a/.circleci/Dockerfile.centos b/.circleci/Dockerfile.oraclelinux similarity index 93% rename from .circleci/Dockerfile.centos rename to .circleci/Dockerfile.oraclelinux index 9070d71d9..cf4c009d2 100644 --- a/.circleci/Dockerfile.centos +++ b/.circleci/Dockerfile.oraclelinux @@ -1,5 +1,5 @@ ARG TAG -FROM centos:${TAG} +FROM oraclelinux:${TAG} ARG PYTHON_VERSION ENV WHEELHOUSE_PATH /tmp/wheelhouse @@ -13,7 +13,6 @@ RUN yum install --assumeyes \ sudo \ make automake gcc gcc-c++ \ python${PYTHON_VERSION} \ - python${PYTHON_VERSION}-devel \ libffi-devel \ openssl-devel \ libyaml \ diff --git a/.circleci/Dockerfile.ubuntu b/.circleci/Dockerfile.ubuntu index 2fcc60f5a..22689f0c1 100644 --- a/.circleci/Dockerfile.ubuntu +++ b/.circleci/Dockerfile.ubuntu @@ -1,7 +1,7 @@ ARG TAG FROM ubuntu:${TAG} ARG PYTHON_VERSION - +ENV DEBIAN_FRONTEND noninteractive ENV WHEELHOUSE_PATH /tmp/wheelhouse ENV VIRTUALENV_PATH /tmp/venv # This will get updated by the CircleCI checkout step. diff --git a/.circleci/config.yml b/.circleci/config.yml index 2fc8e88e7..051e690b7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -15,53 +15,38 @@ workflows: ci: jobs: # Start with jobs testing various platforms. - - "debian-9": - {} - "debian-10": - requires: - - "debian-9" + {} + - "debian-11": + {} - "ubuntu-20-04": {} - "ubuntu-18-04": requires: - "ubuntu-20-04" - - "ubuntu-16-04": - requires: - - "ubuntu-20-04" - - "fedora-29": - {} - - "fedora-28": - requires: - - "fedora-29" - - - "centos-8": + # Equivalent to RHEL 8; CentOS 8 is dead. + - "oraclelinux-8": {} - - "nixos-19-09": - {} + - "nixos": + name: "NixOS 21.05" + nixpkgs: "21.05" - - "nixos-21-05": - {} + - "nixos": + name: "NixOS 21.11" + nixpkgs: "21.11" - # Test against PyPy 2.7 - - "pypy27-buster": - {} - - # Just one Python 3.6 configuration while the port is in-progress. - - "python36": - {} + # Eventually, test against PyPy 3.8 + #- "pypy27-buster": + # {} # Other assorted tasks and configurations - - "lint": - {} - - "codechecks3": + - "codechecks": {} - "pyinstaller": {} - - "deprecations": - {} - "c-locale": {} # Any locale other than C or UTF-8. @@ -72,7 +57,7 @@ workflows: requires: # If the unit test suite doesn't pass, don't bother running the # integration tests. - - "debian-9" + - "debian-11" - "typechecks": {} @@ -102,24 +87,19 @@ workflows: # https://app.circleci.com/settings/organization/github/tahoe-lafs/contexts - "build-image-debian-10": &DOCKERHUB_CONTEXT context: "dockerhub-auth" - - "build-image-debian-9": - <<: *DOCKERHUB_CONTEXT - - "build-image-ubuntu-16-04": + - "build-image-debian-11": <<: *DOCKERHUB_CONTEXT - "build-image-ubuntu-18-04": <<: *DOCKERHUB_CONTEXT - "build-image-ubuntu-20-04": <<: *DOCKERHUB_CONTEXT - - "build-image-fedora-28": + - "build-image-fedora-35": <<: *DOCKERHUB_CONTEXT - - "build-image-fedora-29": - <<: *DOCKERHUB_CONTEXT - - "build-image-centos-8": - <<: *DOCKERHUB_CONTEXT - - "build-image-pypy27-buster": - <<: *DOCKERHUB_CONTEXT - - "build-image-python36-ubuntu": + - "build-image-oraclelinux-8": <<: *DOCKERHUB_CONTEXT + # Restore later as PyPy38 + #- "build-image-pypy27-buster": + # <<: *DOCKERHUB_CONTEXT jobs: @@ -145,10 +125,10 @@ jobs: # Since this job is never scheduled this step is never run so the # actual value here is irrelevant. - lint: + codechecks: docker: - <<: *DOCKERHUB_AUTH - image: "circleci/python:2" + image: "cimg/python:3.9" steps: - "checkout" @@ -163,28 +143,10 @@ jobs: command: | ~/.local/bin/tox -e codechecks - codechecks3: - docker: - - <<: *DOCKERHUB_AUTH - image: "circleci/python:3" - - steps: - - "checkout" - - - run: - name: "Install tox" - command: | - pip install --user tox - - - run: - name: "Static-ish code checks" - command: | - ~/.local/bin/tox -e codechecks3 - pyinstaller: docker: - <<: *DOCKERHUB_AUTH - image: "circleci/python:2" + image: "cimg/python:3.9" steps: - "checkout" @@ -207,10 +169,10 @@ jobs: command: | dist/Tahoe-LAFS/tahoe --version - debian-9: &DEBIAN + debian-10: &DEBIAN docker: - <<: *DOCKERHUB_AUTH - image: "tahoelafsci/debian:9-py2.7" + image: "tahoelafsci/debian:10-py3.7" user: "nobody" environment: &UTF_8_ENVIRONMENT @@ -224,7 +186,7 @@ jobs: # filenames and argv). LANG: "en_US.UTF-8" # Select a tox environment to run for this job. - TAHOE_LAFS_TOX_ENVIRONMENT: "py27" + TAHOE_LAFS_TOX_ENVIRONMENT: "py37" # Additional arguments to pass to tox. TAHOE_LAFS_TOX_ARGS: "" # The path in which test artifacts will be placed. @@ -292,29 +254,29 @@ jobs: /tmp/venv/bin/codecov fi - - debian-10: + debian-11: <<: *DEBIAN docker: - <<: *DOCKERHUB_AUTH - image: "tahoelafsci/debian:10-py2.7" + image: "tahoelafsci/debian:11-py3.9" user: "nobody" - - - pypy27-buster: - <<: *DEBIAN - docker: - - <<: *DOCKERHUB_AUTH - image: "tahoelafsci/pypy:buster-py2" - user: "nobody" - environment: <<: *UTF_8_ENVIRONMENT - # We don't do coverage since it makes PyPy far too slow: - TAHOE_LAFS_TOX_ENVIRONMENT: "pypy27" - # Since we didn't collect it, don't upload it. - UPLOAD_COVERAGE: "" + TAHOE_LAFS_TOX_ENVIRONMENT: "py39" + # Restore later using PyPy3.8 + # pypy27-buster: + # <<: *DEBIAN + # docker: + # - <<: *DOCKERHUB_AUTH + # image: "tahoelafsci/pypy:buster-py2" + # user: "nobody" + # environment: + # <<: *UTF_8_ENVIRONMENT + # # We don't do coverage since it makes PyPy far too slow: + # TAHOE_LAFS_TOX_ENVIRONMENT: "pypy27" + # # Since we didn't collect it, don't upload it. + # UPLOAD_COVERAGE: "" c-locale: <<: *DEBIAN @@ -332,22 +294,12 @@ jobs: # aka "Latin 1" LANG: "en_US.ISO-8859-1" - - deprecations: - <<: *DEBIAN - - environment: - <<: *UTF_8_ENVIRONMENT - # Select the deprecations tox environments. - TAHOE_LAFS_TOX_ENVIRONMENT: "deprecations,upcoming-deprecations" - # Put the logs somewhere we can report them. - TAHOE_LAFS_WARNINGS_LOG: "/tmp/artifacts/deprecation-warnings.log" - # The deprecations tox environments don't do coverage measurement. - UPLOAD_COVERAGE: "" - - integration: <<: *DEBIAN + docker: + - <<: *DOCKERHUB_AUTH + image: "tahoelafsci/debian:11-py3.9" + user: "nobody" environment: <<: *UTF_8_ENVIRONMENT @@ -362,28 +314,11 @@ jobs: - run: *SETUP_VIRTUALENV - run: *RUN_TESTS - - ubuntu-16-04: - <<: *DEBIAN - docker: - - <<: *DOCKERHUB_AUTH - image: "tahoelafsci/ubuntu:16.04-py2.7" - user: "nobody" - - ubuntu-18-04: &UBUNTU_18_04 <<: *DEBIAN docker: - <<: *DOCKERHUB_AUTH - image: "tahoelafsci/ubuntu:18.04-py2.7" - user: "nobody" - - - python36: - <<: *UBUNTU_18_04 - docker: - - <<: *DOCKERHUB_AUTH - image: "tahoelafsci/ubuntu:18.04-py3" + image: "tahoelafsci/ubuntu:18.04-py3.7" user: "nobody" environment: @@ -392,24 +327,28 @@ jobs: # this reporter on Python 3. So drop that and just specify the # reporter. TAHOE_LAFS_TRIAL_ARGS: "--reporter=subunitv2-file" - TAHOE_LAFS_TOX_ENVIRONMENT: "py36" + TAHOE_LAFS_TOX_ENVIRONMENT: "py37" ubuntu-20-04: <<: *DEBIAN docker: - <<: *DOCKERHUB_AUTH - image: "tahoelafsci/ubuntu:20.04" + image: "tahoelafsci/ubuntu:20.04-py3.9" user: "nobody" + environment: + <<: *UTF_8_ENVIRONMENT + TAHOE_LAFS_TOX_ENVIRONMENT: "py39" - - centos-8: &RHEL_DERIV + oraclelinux-8: &RHEL_DERIV docker: - <<: *DOCKERHUB_AUTH - image: "tahoelafsci/centos:8-py2" + image: "tahoelafsci/oraclelinux:8-py3.8" user: "nobody" - environment: *UTF_8_ENVIRONMENT + environment: + <<: *UTF_8_ENVIRONMENT + TAHOE_LAFS_TOX_ENVIRONMENT: "py38" # pip cannot install packages if the working directory is not readable. # We want to run a lot of steps as nobody instead of as root. @@ -425,36 +364,65 @@ jobs: - store_artifacts: *STORE_OTHER_ARTIFACTS - run: *SUBMIT_COVERAGE - - fedora-28: + fedora-35: <<: *RHEL_DERIV docker: - <<: *DOCKERHUB_AUTH - image: "tahoelafsci/fedora:28-py" + image: "tahoelafsci/fedora:35-py3" user: "nobody" + nixos: + parameters: + nixpkgs: + description: >- + Reference the name of a niv-managed nixpkgs source (see `niv show` + and nix/sources.json) + type: "string" - fedora-29: - <<: *RHEL_DERIV - docker: - - <<: *DOCKERHUB_AUTH - image: "tahoelafsci/fedora:29-py" - user: "nobody" - - nixos-19-09: &NIXOS docker: # Run in a highly Nix-capable environment. - <<: *DOCKERHUB_AUTH - image: "nixorg/nix:circleci" + image: "nixos/nix:2.3.16" environment: - NIX_PATH: "nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.09-small.tar.gz" - SOURCE: "nix/" + # CACHIX_AUTH_TOKEN is manually set in the CircleCI web UI and + # allows us to push to CACHIX_NAME. We only need this set for + # `cachix use` in this step. + CACHIX_NAME: "tahoe-lafs-opensource" steps: - - "checkout" - "run": - name: "Build and Test" + # The nixos/nix image does not include ssh. Install it so the + # `checkout` step will succeed. We also want cachix for + # Nix-friendly caching. + name: "Install Basic Dependencies" + command: | + nix-env \ + --file https://github.com/nixos/nixpkgs/archive/nixos-<>.tar.gz \ + --install \ + -A openssh cachix bash + + - "checkout" + + - run: + name: "Cachix setup" + # Record the store paths that exist before we did much. There's no + # reason to cache these, they're either in the image or have to be + # retrieved before we can use cachix to restore from cache. + command: | + cachix use "${CACHIX_NAME}" + nix path-info --all > /tmp/store-path-pre-build + + - "run": + # The Nix package doesn't know how to do this part, unfortunately. + name: "Generate version" + command: | + nix-shell \ + -p 'python3.withPackages (ps: [ ps.setuptools ])' \ + --run 'python setup.py update_version' + + - "run": + name: "Build" command: | # CircleCI build environment looks like it has a zillion and a # half cores. Don't let Nix autodetect this high core count @@ -466,22 +434,55 @@ jobs: # build a couple simple little dependencies that don't take # advantage of multiple cores and we get a little speedup by doing # them in parallel. - nix-build --cores 3 --max-jobs 2 "$SOURCE" + nix-build --cores 3 --max-jobs 2 --argstr pkgsVersion "nixpkgs-<>" - nixos-21-05: - <<: *NIXOS + - "run": + name: "Test" + command: | + # Let it go somewhat wild for the test suite itself + nix-build --cores 8 --argstr pkgsVersion "nixpkgs-<>" tests.nix - environment: - # Note this doesn't look more similar to the 19.09 NIX_PATH URL because - # there was some internal shuffling by the NixOS project about how they - # publish stable revisions. - NIX_PATH: "nixpkgs=https://github.com/NixOS/nixpkgs/archive/d32b07e6df276d78e3640eb43882b80c9b2b3459.tar.gz" - SOURCE: "nix/py3.nix" + - run: + # Send any new store objects to cachix. + name: "Push to Cachix" + when: "always" + command: | + # Cribbed from + # https://circleci.com/blog/managing-secrets-when-you-have-pull-requests-from-outside-contributors/ + if [ -n "$CIRCLE_PR_NUMBER" ]; then + # I'm sure you're thinking "CIRCLE_PR_NUMBER must just be the + # number of the PR being built". Sorry, dear reader, you have + # guessed poorly. It is also conditionally set based on whether + # this is a PR from a fork or not. + # + # https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables + echo "Skipping Cachix push for forked PR." + else + # If this *isn't* a build from a fork then we have the Cachix + # write key in our environment and we can push any new objects + # to Cachix. + # + # To decide what to push, we inspect the list of store objects + # that existed before and after we did most of our work. Any + # that are new after the work is probably a useful thing to have + # around so push it to the cache. We exclude all derivation + # objects (.drv files) because they're cheap to reconstruct and + # by the time you know their cache key you've already done all + # the work anyway. + # + # This shell expression for finding the objects and pushing them + # was from the Cachix docs: + # + # https://docs.cachix.org/continuous-integration-setup/circleci.html + # + # but they seem to have removed it now. + bash -c "comm -13 <(sort /tmp/store-path-pre-build | grep -v '\.drv$') <(nix path-info --all | grep -v '\.drv$' | sort) | cachix push $CACHIX_NAME" + fi typechecks: docker: - <<: *DOCKERHUB_AUTH - image: "tahoelafsci/ubuntu:18.04-py3" + image: "tahoelafsci/ubuntu:18.04-py3.7" steps: - "checkout" @@ -493,7 +494,7 @@ jobs: docs: docker: - <<: *DOCKERHUB_AUTH - image: "tahoelafsci/ubuntu:18.04-py3" + image: "tahoelafsci/ubuntu:18.04-py3.7" steps: - "checkout" @@ -511,16 +512,19 @@ jobs: # https://circleci.com/blog/how-to-build-a-docker-image-on-circleci-2-0/ docker: - <<: *DOCKERHUB_AUTH - image: "docker:17.05.0-ce-git" + # CircleCI build images; https://github.com/CircleCI-Public/cimg-base + # for details. + image: "cimg/base:2022.01" environment: - DISTRO: "tahoelafsci/:foo-py2" - TAG: "tahoelafsci/distro:-py2" + DISTRO: "tahoelafsci/:foo-py3.9" + TAG: "tahoelafsci/distro:-py3.9" PYTHON_VERSION: "tahoelafsci/distro:tag-py`_ and the `Contributor Code of Conduct <../docs/CODE_OF_CONDUCT.md>`_. + + +🥳 First Contribution? +====================== + +If you are committing to Tahoe for the very first time, consider adding your name to our contributor list in `CREDITS <../CREDITS>`__ diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 45b2986a3..163266613 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,6 +6,33 @@ on: - "master" pull_request: +# At the start of each workflow run, GitHub creates a unique +# GITHUB_TOKEN secret to use in the workflow. It is a good idea for +# this GITHUB_TOKEN to have the minimum of permissions. See: +# +# - https://docs.github.com/en/actions/security-guides/automatic-token-authentication +# - https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions +# +permissions: + contents: read + +# Control to what degree jobs in this workflow will run concurrently with +# other instances of themselves. +# +# https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#concurrency +concurrency: + # We want every revision on master to run the workflow completely. + # "head_ref" is not set for the "push" event but it is set for the + # "pull_request" event. If it is set then it is the name of the branch and + # we can use it to make sure each branch has only one active workflow at a + # time. If it is not set then we can compute a unique string that gives + # every master/push workflow its own group. + group: "${{ github.head_ref || format('{0}-{1}', github.run_number, github.run_attempt) }}" + + # Then, we say that if a new workflow wants to start in the same group as a + # running workflow, the running workflow should be cancelled. + cancel-in-progress: true + env: # Tell Hypothesis which configuration we want it to use. TAHOE_LAFS_HYPOTHESIS_PROFILE: "ci" @@ -21,49 +48,35 @@ jobs: - windows-latest - ubuntu-latest python-version: - - 2.7 - - 3.6 - - 3.7 - - 3.8 - - 3.9 + - "3.7" + - "3.8" + - "3.9" + - "3.10" include: - # On macOS don't bother with 3.6-3.8, just to get faster builds. - - os: macos-10.15 - python-version: 2.7 + # On macOS don't bother with 3.7-3.8, just to get faster builds. - os: macos-latest - python-version: 3.9 - + python-version: "3.9" + - os: macos-latest + python-version: "3.10" + # We only support PyPy on Linux at the moment. + - os: ubuntu-latest + python-version: "pypy-3.7" + - os: ubuntu-latest + python-version: "pypy-3.8" + steps: # See https://github.com/actions/checkout. A fetch-depth of 0 # fetches all tags and branches. - name: Check out Tahoe-LAFS sources - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - # To use pip caching with GitHub Actions in an OS-independent - # manner, we need `pip cache dir` command, which became - # available since pip v20.1+. At the time of writing this, - # GitHub Actions offers pip v20.3.3 for both ubuntu-latest and - # windows-latest, and pip v20.3.1 for macos-latest. - - name: Get pip cache directory - id: pip-cache - run: | - echo "::set-output name=dir::$(pip cache dir)" - - # See https://github.com/actions/cache - - name: Use pip cache - uses: actions/cache@v2 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }} - restore-keys: | - ${{ runner.os }}-pip- + cache: 'pip' # caching pip dependencies - name: Install Python packages run: | @@ -77,13 +90,13 @@ jobs: run: python -m tox - name: Upload eliot.log - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@v3 with: name: eliot.log path: eliot.log - name: Upload trial log - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@v3 with: name: test.log path: _trial_temp/test.log @@ -92,25 +105,6 @@ jobs: # Action for this, as of Jan 2021 it does not support Python coverage # files - only lcov files. Therefore, we use coveralls-python, the # coveralls.io-supplied Python reporter, for this. - # - # It is coveralls-python 1.x that has maintained compatibility - # with Python 2, while coveralls-python 3.x is compatible with - # Python 3. Sadly we can't use them both in the same workflow. - # - # The two versions of coveralls-python are somewhat mutually - # incompatible. Mixing these two different versions when - # reporting coverage to coveralls.io will lead to grief, since - # they get job IDs in different fashion. If we use both - # versions of coveralls in the same workflow, the finalizing - # step will be able to mark only part of the jobs as done, and - # the other part will be left hanging, never marked as done: it - # does not matter if we make an API call or `coveralls --finish` - # to indicate that CI has finished running. - # - # So we try to use the newer coveralls-python that is available - # via Python 3 (which is present in GitHub Actions tool cache, - # even when we're running Python 2.7 tests) throughout this - # workflow. - name: "Report Coverage to Coveralls" run: | pip3 install --upgrade coveralls==3.0.1 @@ -161,22 +155,21 @@ jobs: matrix: os: - windows-latest - - ubuntu-latest + # 22.04 has some issue with Tor at the moment: + # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3943 + - ubuntu-20.04 python-version: - - 2.7 - - 3.6 + - 3.7 - 3.9 include: - # On macOS don't bother with 3.6, just to get faster builds. - - os: macos-10.15 - python-version: 2.7 + # On macOS don't bother with 3.7, just to get faster builds. - os: macos-latest python-version: 3.9 steps: - name: Install Tor [Ubuntu] - if: matrix.os == 'ubuntu-latest' + if: ${{ contains(matrix.os, 'ubuntu') }} run: sudo apt install tor # TODO: See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3744. @@ -185,38 +178,24 @@ jobs: - name: Install Tor [macOS, ${{ matrix.python-version }} ] if: ${{ contains(matrix.os, 'macos') }} run: | - brew extract --version 0.4.5.8 tor homebrew/cask - brew install tor@0.4.5.8 - brew link --overwrite tor@0.4.5.8 + brew install tor - name: Install Tor [Windows] if: matrix.os == 'windows-latest' - uses: crazy-max/ghaction-chocolatey@v1 + uses: crazy-max/ghaction-chocolatey@v2 with: args: install tor - name: Check out Tahoe-LAFS sources - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - - name: Get pip cache directory - id: pip-cache - run: | - echo "::set-output name=dir::$(pip cache dir)" - - - name: Use pip cache - uses: actions/cache@v2 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }} - restore-keys: | - ${{ runner.os }}-pip- + cache: 'pip' # caching pip dependencies - name: Install Python packages run: | @@ -226,16 +205,16 @@ jobs: - name: Display tool versions run: python misc/build_helpers/show-tool-versions.py - - name: Run "Python 2 integration tests" - if: ${{ matrix.python-version == '2.7' }} + - name: Run "Python 3 integration tests" + env: + # On macOS this is necessary to ensure unix socket paths for tor + # aren't too long. On Windows tox won't pass it through so it has no + # effect. On Linux it doesn't make a difference one way or another. + TMPDIR: "/tmp" run: tox -e integration - - name: Run "Python 3 integration tests" - if: ${{ matrix.python-version != '2.7' }} - run: tox -e integration3 - - name: Upload eliot.log in case of failure - uses: actions/upload-artifact@v1 + uses: actions/upload-artifact@v3 if: failure() with: name: integration.eliot.json @@ -251,32 +230,20 @@ jobs: - windows-latest - ubuntu-latest python-version: - - 2.7 + - 3.9 steps: - name: Check out Tahoe-LAFS sources - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - - name: Get pip cache directory - id: pip-cache - run: | - echo "::set-output name=dir::$(pip cache dir)" - - - name: Use pip cache - uses: actions/cache@v2 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }} - restore-keys: | - ${{ runner.os }}-pip- + cache: 'pip' # caching pip dependencies - name: Install Python packages run: | @@ -294,7 +261,7 @@ jobs: run: dist/Tahoe-LAFS/tahoe --version - name: Upload PyInstaller package - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: Tahoe-LAFS-${{ matrix.os }}-Python-${{ matrix.python-version }} path: dist/Tahoe-LAFS-*-*.* diff --git a/.gitignore b/.gitignore index d6a58b88b..7c7fa2afd 100644 --- a/.gitignore +++ b/.gitignore @@ -29,8 +29,7 @@ zope.interface-*.egg .pc /src/allmydata/test/plugins/dropin.cache -/_trial_temp* -/_test_memory/ +**/_trial_temp* /tmp* /*.patch /dist/ diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 000000000..65b390f26 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,5 @@ +version: 2 + +python: + install: + - requirements: docs/requirements.txt diff --git a/CLASSIFIERS.txt b/CLASSIFIERS.txt deleted file mode 100644 index 7aa2e35b1..000000000 --- a/CLASSIFIERS.txt +++ /dev/null @@ -1,29 +0,0 @@ -Development Status :: 5 - Production/Stable -Environment :: Console -Environment :: Web Environment -License :: OSI Approved :: GNU General Public License (GPL) -License :: DFSG approved -License :: Other/Proprietary License -Intended Audience :: Developers -Intended Audience :: End Users/Desktop -Intended Audience :: System Administrators -Operating System :: Microsoft -Operating System :: Microsoft :: Windows -Operating System :: Unix -Operating System :: POSIX :: Linux -Operating System :: POSIX -Operating System :: MacOS :: MacOS X -Operating System :: OS Independent -Natural Language :: English -Programming Language :: C -Programming Language :: Python -Programming Language :: Python :: 2 -Programming Language :: Python :: 2.7 -Topic :: Utilities -Topic :: System :: Systems Administration -Topic :: System :: Filesystems -Topic :: System :: Distributed Computing -Topic :: Software Development :: Libraries -Topic :: System :: Archiving :: Backup -Topic :: System :: Archiving :: Mirroring -Topic :: System :: Archiving diff --git a/CREDITS b/CREDITS index b0923fc35..89e1468aa 100644 --- a/CREDITS +++ b/CREDITS @@ -240,3 +240,27 @@ N: Lukas Pirl E: tahoe@lukas-pirl.de W: http://lukas-pirl.de D: Buildslaves (Debian, Fedora, CentOS; 2016-2021) + +N: Anxhelo Lushka +E: anxhelo1995@gmail.com +D: Web site design and updates + +N: Fon E. Noel +E: fenn25.fn@gmail.com +D: bug-fixes and refactoring + +N: Jehad Baeth +E: jehad@leastauthority.com +D: Documentation improvement + +N: May-Lee Sia +E: mayleesia@gmail.com +D: Community-manager and documentation improvements + +N: Yash Nayani +E: yashaswi.nram@gmail.com +D: Installation Guide improvements + +N: Florian Sesser +E: florian@private.storage +D: OpenMetrics support \ No newline at end of file diff --git a/Makefile b/Makefile index f7a357588..c02184a36 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ PYTHON=python export PYTHON PYFLAKES=flake8 export PYFLAKES -VIRTUAL_ENV=./.tox/py27 +VIRTUAL_ENV=./.tox/py37 SOURCES=src/allmydata static misc setup.py APPNAME=tahoe-lafs TEST_SUITE=allmydata @@ -35,7 +35,7 @@ test: .tox/create-venvs.log # Run codechecks first since it takes the least time to report issues early. tox --develop -e codechecks # Run all the test environments in parallel to reduce run-time - tox --develop -p auto -e 'py27,py36,pypy27' + tox --develop -p auto -e 'py37' .PHONY: test-venv-coverage ## Run all tests with coverage collection and reporting. test-venv-coverage: @@ -51,7 +51,7 @@ test-venv-coverage: .PHONY: test-py3-all ## Run all tests under Python 3 test-py3-all: .tox/create-venvs.log - tox --develop -e py36 allmydata + tox --develop -e py37 allmydata # This is necessary only if you want to automatically produce a new # _version.py file from the current git history (without doing a build). @@ -136,37 +136,12 @@ count-lines: # Here is a list of testing tools that can be run with 'python' from a # virtualenv in which Tahoe has been installed. There used to be Makefile # targets for each, but the exact path to a suitable python is now up to the -# developer. But as a hint, after running 'tox', ./.tox/py27/bin/python will +# developer. But as a hint, after running 'tox', ./.tox/py37/bin/python will # probably work. # src/allmydata/test/bench_dirnode.py -# The check-speed and check-grid targets are disabled, since they depend upon -# the pre-located $(TAHOE) executable that was removed when we switched to -# tox. They will eventually be resurrected as dedicated tox environments. - -# The check-speed target uses a pre-established client node to run a canned -# set of performance tests against a test network that is also -# pre-established (probably on a remote machine). Provide it with the path to -# a local directory where this client node has been created (and populated -# with the necessary FURLs of the test network). This target will start that -# client with the current code and then run the tests. Afterwards it will -# stop the client. -# -# The 'sleep 5' is in there to give the new client a chance to connect to its -# storageservers, since check_speed.py has no good way of doing that itself. - -##.PHONY: check-speed -##check-speed: .built -## if [ -z '$(TESTCLIENTDIR)' ]; then exit 1; fi -## @echo "stopping any leftover client code" -## -$(TAHOE) stop $(TESTCLIENTDIR) -## $(TAHOE) start $(TESTCLIENTDIR) -## sleep 5 -## $(TAHOE) @src/allmydata/test/check_speed.py $(TESTCLIENTDIR) -## $(TAHOE) stop $(TESTCLIENTDIR) - # The check-grid target also uses a pre-established client node, along with a # long-term directory that contains some well-known files. See the docstring # in src/allmydata/test/check_grid.py to see how to set this up. @@ -195,12 +170,11 @@ test-clean: # Use 'make distclean' instead to delete all generated files. .PHONY: clean clean: - rm -rf build _trial_temp _test_memory .built + rm -rf build _trial_temp .built rm -f `find src *.egg -name '*.so' -or -name '*.pyc'` rm -rf support dist rm -rf `ls -d *.egg | grep -vEe"setuptools-|setuptools_darcs-|darcsver-"` rm -rf *.pyc - rm -f bin/tahoe bin/tahoe.pyscript rm -f *.pkg .PHONY: distclean @@ -250,3 +224,62 @@ src/allmydata/_version.py: .tox/create-venvs.log: tox.ini setup.py tox --notest -p all | tee -a "$(@)" + + +# to make a new release: +# - create a ticket for the release in Trac +# - ensure local copy is up-to-date +# - create a branch like "XXXX.release" from up-to-date master +# - in the branch, run "make release" +# - run "make release-test" +# - perform any other sanity-checks on the release +# - run "make release-upload" +# Note that several commands below hard-code "meejah"; if you are +# someone else please adjust them. +release: + @echo "Is checkout clean?" + git diff-files --quiet + git diff-index --quiet --cached HEAD -- + + @echo "Clean docs build area" + rm -rf docs/_build/ + + @echo "Install required build software" + python3 -m pip install --editable .[build] + + @echo "Test README" + python3 setup.py check -r -s + + @echo "Update NEWS" + python3 -m towncrier build --yes --version `python3 misc/build_helpers/update-version.py --no-tag` + git add -u + git commit -m "update NEWS for release" + +# note that this always bumps the "middle" number, e.g. from 1.17.1 -> 1.18.0 +# and produces a tag into the Git repository + @echo "Bump version and create tag" + python3 misc/build_helpers/update-version.py + + @echo "Build and sign wheel" + python3 setup.py bdist_wheel + gpg --pinentry=loopback -u meejah@meejah.ca --armor --detach-sign dist/tahoe_lafs-`git describe | cut -b 12-`-py3-none-any.whl + ls dist/*`git describe | cut -b 12-`* + + @echo "Build and sign source-dist" + python3 setup.py sdist + gpg --pinentry=loopback -u meejah@meejah.ca --armor --detach-sign dist/tahoe-lafs-`git describe | cut -b 12-`.tar.gz + ls dist/*`git describe | cut -b 12-`* + +# basically just a bare-minimum smoke-test that it installs and runs +release-test: + gpg --verify dist/tahoe-lafs-`git describe | cut -b 12-`.tar.gz.asc + gpg --verify dist/tahoe_lafs-`git describe | cut -b 12-`-py3-none-any.whl.asc + virtualenv testmf_venv + testmf_venv/bin/pip install dist/tahoe_lafs-`git describe | cut -b 12-`-py3-none-any.whl + testmf_venv/bin/tahoe --version + rm -rf testmf_venv + +release-upload: + scp dist/*`git describe | cut -b 12-`* meejah@tahoe-lafs.org:/home/source/downloads + git push origin_push tahoe-lafs-`git describe | cut -b 12-` + twine upload dist/tahoe_lafs-`git describe | cut -b 12-`-py3-none-any.whl dist/tahoe_lafs-`git describe | cut -b 12-`-py3-none-any.whl.asc dist/tahoe-lafs-`git describe | cut -b 12-`.tar.gz dist/tahoe-lafs-`git describe | cut -b 12-`.tar.gz.asc diff --git a/NEWS.rst b/NEWS.rst index 1cfc726ae..7b1fadb8a 100644 --- a/NEWS.rst +++ b/NEWS.rst @@ -5,6 +5,236 @@ User-Visible Changes in Tahoe-LAFS ================================== .. towncrier start line +Release 1.18.0 (2022-10-02) +''''''''''''''''''''''''''' + +Backwards Incompatible Changes +------------------------------ + +- Python 3.6 is no longer supported, as it has reached end-of-life and is no longer receiving security updates. (`#3865 `_) +- Python 3.7 or later is now required; Python 2 is no longer supported. (`#3873 `_) +- Share corruption reports stored on disk are now always encoded in UTF-8. (`#3879 `_) +- Record both the PID and the process creation-time: + + a new kind of pidfile in `running.process` records both + the PID and the creation-time of the process. This facilitates + automatic discovery of a "stale" pidfile that points to a + currently-running process. If the recorded creation-time matches + the creation-time of the running process, then it is a still-running + `tahoe run` process. Otherwise, the file is stale. + + The `twistd.pid` file is no longer present. (`#3926 `_) + + +Features +-------- + +- The implementation of SDMF and MDMF (mutables) now requires RSA keys to be exactly 2048 bits, aligning them with the specification. + + Some code existed to allow tests to shorten this and it's + conceptually possible a modified client produced mutables + with different key-sizes. However, the spec says that they + must be 2048 bits. If you happen to have a capability with + a key-size different from 2048 you may use 1.17.1 or earlier + to read the content. (`#3828 `_) +- "make" based release automation (`#3846 `_) + + +Misc/Other +---------- + +- `#3327 `_, `#3526 `_, `#3697 `_, `#3709 `_, `#3786 `_, `#3788 `_, `#3802 `_, `#3816 `_, `#3855 `_, `#3858 `_, `#3859 `_, `#3860 `_, `#3867 `_, `#3868 `_, `#3871 `_, `#3872 `_, `#3875 `_, `#3876 `_, `#3877 `_, `#3881 `_, `#3882 `_, `#3883 `_, `#3889 `_, `#3890 `_, `#3891 `_, `#3893 `_, `#3895 `_, `#3896 `_, `#3898 `_, `#3900 `_, `#3909 `_, `#3913 `_, `#3915 `_, `#3916 `_ + + +Release 1.17.1 (2022-01-07) +''''''''''''''''''''''''''' + +Bug Fixes +--------- + +- Fixed regression on Python 3 causing the JSON version of the Welcome page to sometimes produce a 500 error (`#3852 `_) +- Fixed regression on Python 3 where JSON HTTP POSTs failed to be processed. (`#3854 `_) + + +Misc/Other +---------- + +- `#3848 `_, `#3849 `_, `#3850 `_, `#3856 `_ + + +Release 1.17.0 (2021-12-06) +''''''''''''''''''''''''''' + +Security-related Changes +------------------------ + +- The introducer server no longer writes the sensitive introducer fURL value to its log at startup time. Instead it writes the well-known path of the file from which this value can be read. (`#3819 `_) +- The storage protocol operation ``add_lease`` now safely rejects an attempt to add a 4,294,967,296th lease to an immutable share. + Previously this failed with an error after recording the new lease in the share file, resulting in the share file losing track of a one previous lease. (`#3821 `_) +- The storage protocol operation ``readv`` now safely rejects attempts to read negative lengths. + Previously these read requests were satisfied with the complete contents of the share file (including trailing metadata) starting from the specified offset. (`#3822 `_) +- The storage server implementation now respects the ``reserved_space`` configuration value when writing lease information and recording corruption advisories. + Previously, new leases could be created and written to disk even when the storage server had less remaining space than the configured reserve space value. + Now this operation will fail with an exception and the lease will not be created. + Similarly, if there is no space available, corruption advisories will be logged but not written to disk. (`#3823 `_) +- The storage server implementation no longer records corruption advisories about storage indexes for which it holds no shares. (`#3824 `_) +- The lease-checker now uses JSON instead of pickle to serialize its state. + + tahoe will now refuse to run until you either delete all pickle files or + migrate them using the new command:: + + tahoe admin migrate-crawler + + This will migrate all crawler-related pickle files. (`#3825 `_) +- The SFTP server no longer accepts password-based credentials for authentication. + Public/private key-based credentials are now the only supported authentication type. + This removes plaintext password storage from the SFTP credentials file. + It also removes a possible timing side-channel vulnerability which might have allowed attackers to discover an account's plaintext password. (`#3827 `_) +- The storage server now keeps hashes of lease renew and cancel secrets for immutable share files instead of keeping the original secrets. (`#3839 `_) +- The storage server now keeps hashes of lease renew and cancel secrets for mutable share files instead of keeping the original secrets. (`#3841 `_) + + +Features +-------- + +- Tahoe-LAFS releases now have just a .tar.gz source release and a (universal) wheel (`#3735 `_) +- tahoe-lafs now provides its statistics also in OpenMetrics format (for Prometheus et. al.) at `/statistics?t=openmetrics`. (`#3786 `_) +- If uploading an immutable hasn't had a write for 30 minutes, the storage server will abort the upload. (`#3807 `_) + + +Bug Fixes +--------- + +- When uploading an immutable, overlapping writes that include conflicting data are rejected. In practice, this likely didn't happen in real-world usage. (`#3801 `_) + + +Dependency/Installation Changes +------------------------------- + +- Tahoe-LAFS now supports running on NixOS 21.05 with Python 3. (`#3808 `_) + + +Documentation Changes +--------------------- + +- The news file for future releases will include a section for changes with a security impact. (`#3815 `_) + + +Removed Features +---------------- + +- The little-used "control port" has been removed from all node types. (`#3814 `_) + + +Other Changes +------------- + +- Tahoe-LAFS no longer runs its Tor integration test suite on Python 2 due to the increased complexity of obtaining compatible versions of necessary dependencies. (`#3837 `_) + + +Misc/Other +---------- + +- `#3525 `_, `#3527 `_, `#3754 `_, `#3758 `_, `#3784 `_, `#3792 `_, `#3793 `_, `#3795 `_, `#3797 `_, `#3798 `_, `#3799 `_, `#3800 `_, `#3805 `_, `#3806 `_, `#3810 `_, `#3812 `_, `#3820 `_, `#3829 `_, `#3830 `_, `#3831 `_, `#3832 `_, `#3833 `_, `#3834 `_, `#3835 `_, `#3836 `_, `#3838 `_, `#3842 `_, `#3843 `_, `#3847 `_ + + +Release 1.16.0 (2021-09-17) +''''''''''''''''''''''''''' + +Backwards Incompatible Changes +------------------------------ + +- The Tahoe command line now always uses UTF-8 to decode its arguments, regardless of locale. (`#3588 `_) +- tahoe backup's --exclude-from has been renamed to --exclude-from-utf-8, and correspondingly requires the file to be UTF-8 encoded. (`#3716 `_) + + +Features +-------- + +- Added 'typechecks' environment for tox running mypy and performing static typechecks. (`#3399 `_) +- The NixOS-packaged Tahoe-LAFS now knows its own version. (`#3629 `_) + + +Bug Fixes +--------- + +- Fix regression that broke flogtool results on Python 2. (`#3509 `_) +- Fix a logging regression on Python 2 involving unicode strings. (`#3510 `_) +- Certain implementation-internal weakref KeyErrors are now handled and should no longer cause user-initiated operations to fail. (`#3539 `_) +- SFTP public key auth likely works more consistently, and SFTP in general was previously broken. (`#3584 `_) +- Fixed issue where redirecting old-style URIs (/uri/?uri=...) didn't work. (`#3590 `_) +- ``tahoe invite`` will now read share encoding/placement configuration values from a Tahoe client node configuration file if they are not given on the command line, instead of raising an unhandled exception. (`#3650 `_) +- Fix regression where uploading files with non-ASCII names failed. (`#3738 `_) +- Fixed annoying UnicodeWarning message on Python 2 when running CLI tools. (`#3739 `_) +- Fixed bug where share corruption events were not logged on storage servers running on Windows. (`#3779 `_) + + +Dependency/Installation Changes +------------------------------- + +- Tahoe-LAFS now requires Twisted 19.10.0 or newer. As a result, it now has a transitive dependency on bcrypt. (`#1549 `_) +- Debian 8 support has been replaced with Debian 10 support. (`#3326 `_) +- Tahoe-LAFS no longer depends on Nevow. (`#3433 `_) +- Tahoe-LAFS now requires the `netifaces` Python package and no longer requires the external `ip`, `ifconfig`, or `route.exe` executables. (`#3486 `_) +- The Tahoe-LAFS project no longer commits to maintaining binary packages for all dependencies at . Please use PyPI instead. (`#3497 `_) +- Tahoe-LAFS now uses a forked version of txi2p (named txi2p-tahoe) with Python 3 support. (`#3633 `_) +- The Nix package now includes correct version information. (`#3712 `_) +- Use netifaces 0.11.0 wheel package from PyPI.org if you use 64-bit Python 2.7 on Windows. VCPython27 downloads are no longer available at Microsoft's website, which has made building Python 2.7 wheel packages of Python libraries with C extensions (such as netifaces) on Windows difficult. (`#3733 `_) + + +Configuration Changes +--------------------- + +- The ``[client]introducer.furl`` configuration item is now deprecated in favor of the ``private/introducers.yaml`` file. (`#3504 `_) + + +Documentation Changes +--------------------- + +- Documentation now has its own towncrier category. (`#3664 `_) +- `tox -e docs` will treat warnings about docs as errors. (`#3666 `_) +- The visibility of the Tahoe-LAFS logo has been improved for "dark" themed viewing. (`#3677 `_) +- A cheatsheet-style document for contributors was created at CONTRIBUTORS.rst (`#3682 `_) +- Our IRC channel, #tahoe-lafs, has been moved to irc.libera.chat. (`#3721 `_) +- Tahoe-LAFS project is now registered with Libera.Chat IRC network. (`#3726 `_) +- Rewriting the installation guide for Tahoe-LAFS. (`#3747 `_) +- Documentation and installation links in the README have been fixed. (`#3749 `_) +- The Great Black Swamp proposed specification now includes sample interactions to demonstrate expected usage patterns. (`#3764 `_) +- The Great Black Swamp proposed specification now includes a glossary. (`#3765 `_) +- The Great Black Swamp specification now allows parallel upload of immutable share data. (`#3769 `_) +- There is now a specification for the scheme which Tahoe-LAFS storage clients use to derive their lease renewal secrets. (`#3774 `_) +- The Great Black Swamp proposed specification now has a simplified interface for reading data from immutable shares. (`#3777 `_) +- tahoe-dev mailing list is now at tahoe-dev@lists.tahoe-lafs.org. (`#3782 `_) +- The Great Black Swamp specification now describes the required authorization scheme. (`#3785 `_) +- The "Great Black Swamp" proposed specification has been expanded to include two lease management APIs. (`#3037 `_) +- The specification section of the Tahoe-LAFS documentation now includes explicit discussion of the security properties of Foolscap "fURLs" on which it depends. (`#3503 `_) +- The README, revised by Viktoriia with feedback from the team, is now more focused on the developer community and provides more information about Tahoe-LAFS, why it's important, and how someone can use it or start contributing to it. (`#3545 `_) +- The "Great Black Swamp" proposed specification has been changed use ``v=1`` as the URL version identifier. (`#3644 `_) +- You can run `make livehtml` in docs directory to invoke sphinx-autobuild. (`#3663 `_) + + +Removed Features +---------------- + +- Announcements delivered through the introducer system are no longer automatically annotated with copious information about the Tahoe-LAFS software version nor the versions of its dependencies. (`#3518 `_) +- The stats gatherer, broken since at least Tahoe-LAFS 1.13.0, has been removed. The ``[client]stats_gatherer.furl`` configuration item in ``tahoe.cfg`` is no longer allowed. The Tahoe-LAFS project recommends using a third-party metrics aggregation tool instead. (`#3549 `_) +- The deprecated ``tahoe`` start, restart, stop, and daemonize sub-commands have been removed. (`#3550 `_) +- FTP is no longer supported by Tahoe-LAFS. Please use the SFTP support instead. (`#3583 `_) +- Removed support for the Account Server frontend authentication type. (`#3652 `_) + + +Other Changes +------------- + +- Refactored test_introducer in web tests to use custom base test cases (`#3757 `_) + + +Misc/Other +---------- + +- `#2928 `_, `#3283 `_, `#3314 `_, `#3384 `_, `#3385 `_, `#3390 `_, `#3404 `_, `#3428 `_, `#3432 `_, `#3434 `_, `#3435 `_, `#3454 `_, `#3459 `_, `#3460 `_, `#3465 `_, `#3466 `_, `#3467 `_, `#3468 `_, `#3470 `_, `#3471 `_, `#3472 `_, `#3473 `_, `#3474 `_, `#3475 `_, `#3477 `_, `#3478 `_, `#3479 `_, `#3481 `_, `#3482 `_, `#3483 `_, `#3485 `_, `#3488 `_, `#3490 `_, `#3491 `_, `#3492 `_, `#3493 `_, `#3496 `_, `#3499 `_, `#3500 `_, `#3501 `_, `#3502 `_, `#3511 `_, `#3513 `_, `#3514 `_, `#3515 `_, `#3517 `_, `#3520 `_, `#3521 `_, `#3522 `_, `#3523 `_, `#3524 `_, `#3528 `_, `#3529 `_, `#3532 `_, `#3533 `_, `#3534 `_, `#3536 `_, `#3537 `_, `#3542 `_, `#3544 `_, `#3546 `_, `#3547 `_, `#3551 `_, `#3552 `_, `#3553 `_, `#3555 `_, `#3557 `_, `#3558 `_, `#3560 `_, `#3563 `_, `#3564 `_, `#3565 `_, `#3566 `_, `#3567 `_, `#3568 `_, `#3572 `_, `#3574 `_, `#3575 `_, `#3576 `_, `#3577 `_, `#3578 `_, `#3579 `_, `#3580 `_, `#3582 `_, `#3587 `_, `#3588 `_, `#3589 `_, `#3591 `_, `#3592 `_, `#3593 `_, `#3594 `_, `#3595 `_, `#3596 `_, `#3599 `_, `#3600 `_, `#3603 `_, `#3605 `_, `#3606 `_, `#3607 `_, `#3608 `_, `#3611 `_, `#3612 `_, `#3613 `_, `#3615 `_, `#3616 `_, `#3617 `_, `#3618 `_, `#3619 `_, `#3620 `_, `#3621 `_, `#3623 `_, `#3624 `_, `#3625 `_, `#3626 `_, `#3628 `_, `#3630 `_, `#3631 `_, `#3632 `_, `#3634 `_, `#3635 `_, `#3637 `_, `#3638 `_, `#3640 `_, `#3642 `_, `#3645 `_, `#3646 `_, `#3647 `_, `#3648 `_, `#3649 `_, `#3651 `_, `#3653 `_, `#3654 `_, `#3655 `_, `#3656 `_, `#3657 `_, `#3658 `_, `#3662 `_, `#3667 `_, `#3669 `_, `#3670 `_, `#3671 `_, `#3672 `_, `#3674 `_, `#3675 `_, `#3676 `_, `#3678 `_, `#3679 `_, `#3681 `_, `#3683 `_, `#3686 `_, `#3687 `_, `#3691 `_, `#3692 `_, `#3699 `_, `#3700 `_, `#3701 `_, `#3702 `_, `#3703 `_, `#3704 `_, `#3705 `_, `#3707 `_, `#3708 `_, `#3709 `_, `#3711 `_, `#3713 `_, `#3714 `_, `#3715 `_, `#3717 `_, `#3718 `_, `#3722 `_, `#3723 `_, `#3727 `_, `#3728 `_, `#3729 `_, `#3730 `_, `#3731 `_, `#3732 `_, `#3734 `_, `#3735 `_, `#3736 `_, `#3741 `_, `#3743 `_, `#3744 `_, `#3745 `_, `#3746 `_, `#3751 `_, `#3759 `_, `#3760 `_, `#3763 `_, `#3773 `_, `#3781 `_ + + Release 1.15.1 '''''''''''''' diff --git a/README.rst b/README.rst index 705ed11bb..317378fae 100644 --- a/README.rst +++ b/README.rst @@ -53,12 +53,11 @@ For more detailed instructions, read `Installing Tahoe-LAFS `__ to learn how to set up your first Tahoe-LAFS node. -🐍 Python 3 Support --------------------- +🐍 Python 2 +----------- -Python 3 support has been introduced starting with Tahoe-LAFS 1.16.0, alongside Python 2. -System administrators are advised to start running Tahoe on Python 3 and should expect Python 2 support to be dropped in a future version. -Please, feel free to file issues if you run into bugs while running Tahoe on Python 3. +Python 3.7 or later is now required. +If you are still using Python 2.7, use Tahoe-LAFS version 1.17.1. 🤖 Issues @@ -95,7 +94,14 @@ As a community-driven open source project, Tahoe-LAFS welcomes contributions of - `Patch reviews `__ -Before authoring or reviewing a patch, please familiarize yourself with the `Coding Standard `__ and the `Contributor Code of Conduct `__. +Before authoring or reviewing a patch, please familiarize yourself with the `Coding Standard `__ and the `Contributor Code of Conduct `__. + + +🥳 First Contribution? +---------------------- + +If you are committing to Tahoe for the very first time, it's required that you add your name to our contributor list in `CREDITS `__. Please ensure that this addition has it's own commit within your first contribution. + 🤝 Supporters -------------- diff --git a/default.nix b/default.nix new file mode 100644 index 000000000..5f4db2c78 --- /dev/null +++ b/default.nix @@ -0,0 +1,102 @@ +let + # sources.nix contains information about which versions of some of our + # dependencies we should use. since we use it to pin nixpkgs and the PyPI + # package database, roughly all the rest of our dependencies are *also* + # pinned - indirectly. + # + # sources.nix is managed using a tool called `niv`. as an example, to + # update to the most recent version of nixpkgs from the 21.11 maintenance + # release, in the top-level tahoe-lafs checkout directory you run: + # + # niv update nixpkgs-21.11 + # + # or, to update the PyPI package database -- which is necessary to make any + # newly released packages visible -- you likewise run: + # + # niv update pypi-deps-db + # + # niv also supports chosing a specific revision, following a different + # branch, etc. find complete documentation for the tool at + # https://github.com/nmattia/niv + sources = import nix/sources.nix; +in +{ + pkgsVersion ? "nixpkgs-21.11" # a string which chooses a nixpkgs from the + # niv-managed sources data + +, pkgs ? import sources.${pkgsVersion} { } # nixpkgs itself + +, pypiData ? sources.pypi-deps-db # the pypi package database snapshot to use + # for dependency resolution + +, pythonVersion ? "python37" # a string choosing the python derivation from + # nixpkgs to target + +, extras ? [ "tor" "i2p" ] # a list of strings identifying tahoe-lafs extras, + # the dependencies of which the resulting package + # will also depend on. Include all of the runtime + # extras by default because the incremental cost of + # including them is a lot smaller than the cost of + # re-building the whole thing to add them. + +, mach-nix ? import sources.mach-nix { # the mach-nix package to use to build + # the tahoe-lafs package + inherit pkgs pypiData; + python = pythonVersion; +} +}: +# The project name, version, and most other metadata are automatically +# extracted from the source. Some requirements are not properly extracted +# and those cases are handled below. The version can only be extracted if +# `setup.py update_version` has been run (this is not at all ideal but it +# seems difficult to fix) - so for now just be sure to run that first. +mach-nix.buildPythonPackage rec { + # Define the location of the Tahoe-LAFS source to be packaged. Clean up all + # as many of the non-source files (eg the `.git` directory, `~` backup + # files, nix's own `result` symlink, etc) as possible to avoid needing to + # re-build when files that make no difference to the package have changed. + src = pkgs.lib.cleanSource ./.; + + # Select whichever package extras were requested. + inherit extras; + + # Define some extra requirements that mach-nix does not automatically detect + # from inspection of the source. We typically don't need to put version + # constraints on any of these requirements. The pypi-deps-db we're + # operating with makes dependency resolution deterministic so as long as it + # works once it will always work. It could be that in the future we update + # pypi-deps-db and an incompatibility arises - in which case it would make + # sense to apply some version constraints here. + requirementsExtra = '' + # mach-nix does not yet support pyproject.toml which means it misses any + # build-time requirements of our dependencies which are declared in such a + # file. Tell it about them here. + setuptools_rust + + # mach-nix does not yet parse environment markers (e.g. "python > '3.0'") + # correctly. It misses all of our requirements which have an environment marker. + # Duplicate them here. + foolscap + eliot + pyrsistent + collections-extended + ''; + + # Specify where mach-nix should find packages for our Python dependencies. + # There are some reasonable defaults so we only need to specify certain + # packages where the default configuration runs into some issue. + providers = { + }; + + # Define certain overrides to the way Python dependencies are built. + _ = { + # Remove a click-default-group patch for a test suite problem which no + # longer applies because the project apparently no longer has a test suite + # in its source distribution. + click-default-group.patches = []; + }; + + passthru.meta.mach-nix = { + inherit providers _; + }; +} diff --git a/docs/Installation/install-tahoe.rst b/docs/Installation/install-tahoe.rst index 2fe47f4a8..8ceca2e01 100644 --- a/docs/Installation/install-tahoe.rst +++ b/docs/Installation/install-tahoe.rst @@ -28,15 +28,15 @@ To install Tahoe-LAFS on Windows: 3. Open the installer by double-clicking it. Select the **Add Python to PATH** check-box, then click **Install Now**. 4. Start PowerShell and enter the following command to verify python installation:: - + python --version 5. Enter the following command to install Tahoe-LAFS:: - + pip install tahoe-lafs 6. Verify installation by checking for the version:: - + tahoe --version If you want to hack on Tahoe's source code, you can install Tahoe in a ``virtualenv`` on your Windows Machine. To learn more, see :doc:`install-on-windows`. @@ -56,13 +56,13 @@ If you are working on MacOS or a Linux distribution which does not have Tahoe-LA * **pip**: Most python installations already include `pip`. However, if your installation does not, see `pip installation `_. 2. Install Tahoe-LAFS using pip:: - + pip install tahoe-lafs 3. Verify installation by checking for the version:: - + tahoe --version -If you are looking to hack on the source code or run pre-release code, we recommend you install Tahoe-LAFS on a `virtualenv` instance. To learn more, see :doc:`install-on-linux`. +If you are looking to hack on the source code or run pre-release code, we recommend you install Tahoe-LAFS on a `virtualenv` instance. To learn more, see :doc:`install-on-linux`. You can always write to the `tahoe-dev mailing list `_ or chat on the `Libera.chat IRC `_ if you are not able to get Tahoe-LAFS up and running on your deployment. diff --git a/docs/check_running.py b/docs/check_running.py new file mode 100644 index 000000000..2705f1721 --- /dev/null +++ b/docs/check_running.py @@ -0,0 +1,47 @@ + +import psutil +import filelock + + +def can_spawn_tahoe(pidfile): + """ + Determine if we can spawn a Tahoe-LAFS for the given pidfile. That + pidfile may be deleted if it is stale. + + :param pathlib.Path pidfile: the file to check, that is the Path + to "running.process" in a Tahoe-LAFS configuration directory + + :returns bool: True if we can spawn `tahoe run` here + """ + lockpath = pidfile.parent / (pidfile.name + ".lock") + with filelock.FileLock(lockpath): + try: + with pidfile.open("r") as f: + pid, create_time = f.read().strip().split(" ", 1) + except FileNotFoundError: + return True + + # somewhat interesting: we have a pidfile + pid = int(pid) + create_time = float(create_time) + + try: + proc = psutil.Process(pid) + # most interesting case: there _is_ a process running at the + # recorded PID -- but did it just happen to get that PID, or + # is it the very same one that wrote the file? + if create_time == proc.create_time(): + # _not_ stale! another intance is still running against + # this configuration + return False + + except psutil.NoSuchProcess: + pass + + # the file is stale + pidfile.unlink() + return True + + +from pathlib import Path +print("can spawn?", can_spawn_tahoe(Path("running.process"))) diff --git a/docs/conf.py b/docs/conf.py index af05e5900..cc9a11166 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -63,7 +63,7 @@ release = u'1.x' # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = "en" # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: diff --git a/docs/frontends/FTP-and-SFTP.rst b/docs/frontends/FTP-and-SFTP.rst index 9d4f1dcec..ede719e26 100644 --- a/docs/frontends/FTP-and-SFTP.rst +++ b/docs/frontends/FTP-and-SFTP.rst @@ -47,8 +47,8 @@ servers must be configured with a way to first authenticate a user (confirm that a prospective client has a legitimate claim to whatever authorities we might grant a particular user), and second to decide what directory cap should be used as the root directory for a log-in by the authenticated user. -A username and password can be used; as of Tahoe-LAFS v1.11, RSA or DSA -public key authentication is also supported. +As of Tahoe-LAFS v1.17, +RSA/DSA public key authentication is the only supported mechanism. Tahoe-LAFS provides two mechanisms to perform this user-to-cap mapping. The first (recommended) is a simple flat file with one account per line. @@ -59,20 +59,14 @@ Creating an Account File To use the first form, create a file (for example ``BASEDIR/private/accounts``) in which each non-comment/non-blank line is a space-separated line of -(USERNAME, PASSWORD, ROOTCAP), like so:: +(USERNAME, KEY-TYPE, PUBLIC-KEY, ROOTCAP), like so:: % cat BASEDIR/private/accounts - # This is a password line: username password cap - alice password URI:DIR2:ioej8xmzrwilg772gzj4fhdg7a:wtiizszzz2rgmczv4wl6bqvbv33ag4kvbr6prz3u6w3geixa6m6a - bob sekrit URI:DIR2:6bdmeitystckbl9yqlw7g56f4e:serp5ioqxnh34mlbmzwvkp3odehsyrr7eytt5f64we3k9hhcrcja - # This is a public key line: username keytype pubkey cap # (Tahoe-LAFS v1.11 or later) carol ssh-rsa AAAA... URI:DIR2:ovjy4yhylqlfoqg2vcze36dhde:4d4f47qko2xm5g7osgo2yyidi5m4muyo2vjjy53q4vjju2u55mfa -For public key authentication, the keytype may be either "ssh-rsa" or "ssh-dsa". -To avoid ambiguity between passwords and public key types, a password cannot -start with "ssh-". +The key type may be either "ssh-rsa" or "ssh-dsa". Now add an ``accounts.file`` directive to your ``tahoe.cfg`` file, as described in the next sections. diff --git a/docs/gpg-setup.rst b/docs/gpg-setup.rst new file mode 100644 index 000000000..cb8cbfd20 --- /dev/null +++ b/docs/gpg-setup.rst @@ -0,0 +1,18 @@ +Preparing to Authenticate Release (Setting up GPG) +-------------------------------------------------- + +In other to keep releases authentic it's required that releases are signed before being +published. This ensure's that users of Tahoe are able to verify that the version of Tahoe +they are using is coming from a trusted or at the very least known source. + +The authentication is done using the ``GPG`` implementation of ``OpenGPG`` to be able to complete +the release steps you would have to download the ``GPG`` software and setup a key(identity). + +- `Download `__ and install GPG for your operating system. +- Generate a key pair using ``gpg --gen-key``. *Some questions would be asked to personalize your key configuration.* + +You might take additional steps including: + +- Setting up a revocation certificate (Incase you lose your secret key) +- Backing up your key pair +- Upload your fingerprint to a keyserver such as `openpgp.org `__ diff --git a/docs/index.rst b/docs/index.rst index 16067597a..3da03341a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -29,6 +29,7 @@ Contents: contributing CODE_OF_CONDUCT release-checklist + gpg-setup servers helper diff --git a/docs/proposed/http-storage-node-protocol.rst b/docs/proposed/http-storage-node-protocol.rst index 521bf476d..aee201cf5 100644 --- a/docs/proposed/http-storage-node-protocol.rst +++ b/docs/proposed/http-storage-node-protocol.rst @@ -30,15 +30,15 @@ Glossary introducer a Tahoe-LAFS process at a known location configured to re-publish announcements about the location of storage servers - fURL + :ref:`fURLs ` a self-authenticating URL-like string which can be used to locate a remote object using the Foolscap protocol (the storage service is an example of such an object) - NURL + :ref:`NURLs ` a self-authenticating URL-like string almost exactly like a fURL but without being tied to Foolscap swissnum - a short random string which is part of a fURL and which acts as a shared secret to authorize clients to use a storage service + a short random string which is part of a fURL/NURL and which acts as a shared secret to authorize clients to use a storage service lease state associated with a share informing a storage server of the duration of storage desired by a client @@ -211,15 +211,15 @@ To further clarify, consider this example. Alice operates a storage node. Alice generates a key pair and secures it properly. Alice generates a self-signed storage node certificate with the key pair. -Alice's storage node announces (to an introducer) a fURL containing (among other information) the SPKI hash. +Alice's storage node announces (to an introducer) a NURL containing (among other information) the SPKI hash. Imagine the SPKI hash is ``i5xb...``. -This results in a fURL of ``pb://i5xb...@example.com:443/g3m5...#v=1``. +This results in a NURL of ``pb://i5xb...@example.com:443/g3m5...#v=1``. Bob creates a client node pointed at the same introducer. Bob's client node receives the announcement from Alice's storage node (indirected through the introducer). -Bob's client node recognizes the fURL as referring to an HTTP-dialect server due to the ``v=1`` fragment. -Bob's client node can now perform a TLS handshake with a server at the address in the fURL location hints +Bob's client node recognizes the NURL as referring to an HTTP-dialect server due to the ``v=1`` fragment. +Bob's client node can now perform a TLS handshake with a server at the address in the NURL location hints (``example.com:443`` in this example). Following the above described validation procedures, Bob's client node can determine whether it has reached Alice's storage node or not. @@ -230,7 +230,7 @@ Additionally, by continuing to interact using TLS, Bob's client and Alice's storage node are assured of both **message authentication** and **message confidentiality**. -Bob's client further inspects the fURL for the *swissnum*. +Bob's client further inspects the NURL for the *swissnum*. When Bob's client issues HTTP requests to Alice's storage node it includes the *swissnum* in its requests. **Storage authorization** has been achieved. @@ -266,8 +266,8 @@ Generation of a new certificate allows for certain non-optimal conditions to be * The ``commonName`` of ``newpb_thingy`` may be changed to a more descriptive value. * A ``notValidAfter`` field with a timestamp in the past may be updated. -Storage nodes will announce a new fURL for this new HTTP-based server. -This fURL will be announced alongside their existing Foolscap-based server's fURL. +Storage nodes will announce a new NURL for this new HTTP-based server. +This NURL will be announced alongside their existing Foolscap-based server's fURL. Such an announcement will resemble this:: { @@ -312,7 +312,7 @@ The follow sequence of events is likely: #. The client uses the information in its cache to open a Foolscap connection to the storage server. Ideally, -the client would not rely on an update from the introducer to give it the GBS fURL for the updated storage server. +the client would not rely on an update from the introducer to give it the GBS NURL for the updated storage server. Therefore, when an updated client connects to a storage server using Foolscap, it should request the server's version information. @@ -350,6 +350,11 @@ Because of the simple types used throughout and the equivalence described in `RFC 7049`_ these examples should be representative regardless of which of these two encodings is chosen. +The one exception is sets. +For CBOR messages, any sequence that is semantically a set (i.e. no repeated values allowed, order doesn't matter, and elements are hashable in Python) should be sent as a set. +Tag 6.258 is used to indicate sets in CBOR; see `the CBOR registry `_ for more details. +Sets will be represented as JSON lists in examples because JSON doesn't support sets. + HTTP Design ~~~~~~~~~~~ @@ -363,17 +368,35 @@ one branch contains all of the share data; another branch contains all of the lease data; etc. -Authorization is required for all endpoints. +An ``Authorization`` header in requests is required for all endpoints. The standard HTTP authorization protocol is used. The authentication *type* used is ``Tahoe-LAFS``. The swissnum from the NURL used to locate the storage service is used as the *credentials*. -If credentials are not presented or the swissnum is not associated with a storage service then no storage processing is performed and the request receives an ``UNAUTHORIZED`` response. +If credentials are not presented or the swissnum is not associated with a storage service then no storage processing is performed and the request receives an ``401 UNAUTHORIZED`` response. + +There are also, for some endpoints, secrets sent via ``X-Tahoe-Authorization`` headers. +If these are: + +1. Missing. +2. The wrong length. +3. Not the expected kind of secret. +4. They are otherwise unparseable before they are actually semantically used. + +the server will respond with ``400 BAD REQUEST``. +401 is not used because this isn't an authorization problem, this is a "you sent garbage and should know better" bug. + +If authorization using the secret fails, then a ``401 UNAUTHORIZED`` response should be sent. + +Encoding +~~~~~~~~ + +* ``storage_index`` should be base32 encoded (RFC3548) in URLs. General ~~~~~~~ -``GET /v1/version`` -!!!!!!!!!!!!!!!!!!! +``GET /storage/v1/version`` +!!!!!!!!!!!!!!!!!!!!!!!!!!! Retrieve information about the version of the storage server. Information is returned as an encoded mapping. @@ -386,27 +409,28 @@ For example:: "tolerates-immutable-read-overrun": true, "delete-mutable-shares-with-zero-length-writev": true, "fills-holes-with-zero-bytes": true, - "prevents-read-past-end-of-share-data": true, - "gbs-anonymous-storage-url": "pb://...#v=1" + "prevents-read-past-end-of-share-data": true }, "application-version": "1.13.0" } -``PUT /v1/lease/:storage_index`` -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +``PUT /storage/v1/lease/:storage_index`` +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Either renew or create a new lease on the bucket addressed by ``storage_index``. -The details of the lease are encoded in the request body. + +The renew secret and cancellation secret should be included as ``X-Tahoe-Authorization`` headers. For example:: - {"renew-secret": "abcd", "cancel-secret": "efgh"} + X-Tahoe-Authorization: lease-renew-secret + X-Tahoe-Authorization: lease-cancel-secret -If the ``renew-secret`` value matches an existing lease +If the ``lease-renew-secret`` value matches an existing lease then the expiration time of that lease will be changed to 31 days after the time of this operation. If it does not match an existing lease -then a new lease will be created with this ``renew-secret`` which expires 31 days after the time of this operation. +then a new lease will be created with this ``lease-renew-secret`` which expires 31 days after the time of this operation. -``renew-secret`` and ``cancel-secret`` values must be 32 bytes long. +``lease-renew-secret`` and ``lease-cancel-secret`` values must be 32 bytes long. The server treats them as opaque values. :ref:`Share Leases` gives details about how the Tahoe-LAFS storage client constructs these values. @@ -423,8 +447,10 @@ In these cases the server takes no action and returns ``NOT FOUND``. Discussion `````````` -We considered an alternative where ``renew-secret`` and ``cancel-secret`` are placed in query arguments on the request path. -We chose to put these values into the request body to make the URL simpler. +We considered an alternative where ``lease-renew-secret`` and ``lease-cancel-secret`` are placed in query arguments on the request path. +This increases chances of leaking secrets in logs. +Putting the secrets in the body reduces the chances of leaking secrets, +but eventually we chose headers as the least likely information to be logged. Several behaviors here are blindly copied from the Foolscap-based storage server protocol. @@ -441,8 +467,8 @@ Immutable Writing ~~~~~~~ -``POST /v1/immutable/:storage_index`` -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +``POST /storage/v1/immutable/:storage_index`` +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Initialize an immutable storage index with some buckets. The buckets may have share data written to them once. @@ -450,18 +476,34 @@ A lease is also created for the shares. Details of the buckets to create are encoded in the request body. For example:: - {"renew-secret": "efgh", "cancel-secret": "ijkl", - "share-numbers": [1, 7, ...], "allocated-size": 12345} + {"share-numbers": [1, 7, ...], "allocated-size": 12345} + +The request must include ``X-Tahoe-Authorization`` HTTP headers that set the various secrets—upload, lease renewal, lease cancellation—that will be later used to authorize various operations. +For example:: + + X-Tahoe-Authorization: lease-renew-secret + X-Tahoe-Authorization: lease-cancel-secret + X-Tahoe-Authorization: upload-secret The response body includes encoded information about the created buckets. For example:: {"already-have": [1, ...], "allocated": [7, ...]} +The upload secret is an opaque _byte_ string. + +Handling repeat calls: + +* If the same API call is repeated with the same upload secret, the response is the same and no change is made to server state. + This is necessary to ensure retries work in the face of lost responses from the server. +* If the API calls is with a different upload secret, this implies a new client, perhaps because the old client died. + Or it may happen because the client wants to upload a different share number than a previous client. + New shares will be created, existing shares will be unchanged, regardless of whether the upload secret matches or not. + Discussion `````````` -We considered making this ``POST /v1/immutable`` instead. +We considered making this ``POST /storage/v1/immutable`` instead. The motivation was to keep *storage index* out of the request URL. Request URLs have an elevated chance of being logged by something. We were concerned that having the *storage index* logged may increase some risks. @@ -482,13 +524,27 @@ The response includes ``already-have`` and ``allocated`` for two reasons: This might be because a server has become unavailable and a remaining server needs to store more shares for the upload. It could also just be that the client's preferred servers have changed. -``PATCH /v1/immutable/:storage_index/:share_number`` -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +Regarding upload secrets, +the goal is for uploading and aborting (see next sections) to be authenticated by more than just the storage index. +In the future, we may want to generate them in a way that allows resuming/canceling when the client has issues. +In the short term, they can just be a random byte string. +The primary security constraint is that each upload to each server has its own unique upload key, +tied to uploading that particular storage index to this particular server. + +Rejected designs for upload secrets: + +* Upload secret per share number. + In order to make the secret unguessable by attackers, which includes other servers, + it must contain randomness. + Randomness means there is no need to have a secret per share, since adding share-specific content to randomness doesn't actually make the secret any better. + +``PATCH /storage/v1/immutable/:storage_index/:share_number`` +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Write data for the indicated share. The share number must belong to the storage index. The request body is the raw share data (i.e., ``application/octet-stream``). -*Content-Range* requests are encouraged for large transfers to allow partially complete uploads to be resumed. +*Content-Range* requests are required; for large transfers this allows partially complete uploads to be resumed. For example, a 1MiB share can be divided in to eight separate 128KiB chunks. Each chunk can be uploaded in a separate request. @@ -498,6 +554,12 @@ If any one of these requests fails then at most 128KiB of upload work needs to b The server must recognize when all of the data has been received and mark the share as complete (which it can do because it was informed of the size when the storage index was initialized). +The request must include a ``X-Tahoe-Authorization`` header that includes the upload secret:: + + X-Tahoe-Authorization: upload-secret + +Responses: + * When a chunk that does not complete the share is successfully uploaded the response is ``OK``. The response body indicates the range of share data that has yet to be uploaded. That is:: @@ -517,20 +579,6 @@ The server must recognize when all of the data has been received and mark the sh the response is ``CONFLICT``. At this point the only thing to do is abort the upload and start from scratch (see below). -``PUT /v1/immutable/:storage_index/:share_number/abort`` -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -This cancels an *in-progress* upload. - -The response code: - -* When the upload is still in progress and therefore the abort has succeeded, - the response is ``OK``. - Future uploads can start from scratch with no pre-existing upload state stored on the server. -* If the uploaded has already finished, the response is 405 (Method Not Allowed) - and no change is made. - - Discussion `````````` @@ -549,12 +597,31 @@ From RFC 7231:: PATCH method defined in [RFC5789]). -``POST /v1/immutable/:storage_index/:share_number/corrupt`` -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -Advise the server the data read from the indicated share was corrupt. -The request body includes an human-meaningful string with details about the corruption. -It also includes potentially important details about the share. +``PUT /storage/v1/immutable/:storage_index/:share_number/abort`` +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +This cancels an *in-progress* upload. + +The request must include a ``X-Tahoe-Authorization`` header that includes the upload secret:: + + X-Tahoe-Authorization: upload-secret + +The response code: + +* When the upload is still in progress and therefore the abort has succeeded, + the response is ``OK``. + Future uploads can start from scratch with no pre-existing upload state stored on the server. +* If the uploaded has already finished, the response is 405 (Method Not Allowed) + and no change is made. + + +``POST /storage/v1/immutable/:storage_index/:share_number/corrupt`` +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +Advise the server the data read from the indicated share was corrupt. The +request body includes an human-meaningful text string with details about the +corruption. It also includes potentially important details about the share. For example:: @@ -562,25 +629,35 @@ For example:: .. share-type, storage-index, and share-number are inferred from the URL +The response code is OK (200) by default, or NOT FOUND (404) if the share +couldn't be found. + Reading ~~~~~~~ -``GET /v1/immutable/:storage_index/shares`` -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +``GET /storage/v1/immutable/:storage_index/shares`` +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -Retrieve a list indicating all shares available for the indicated storage index. -For example:: +Retrieve a list (semantically, a set) indicating all shares available for the +indicated storage index. For example:: [1, 5] -``GET /v1/immutable/:storage_index/:share_number`` -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +An unknown storage index results in an empty list. + +``GET /storage/v1/immutable/:storage_index/:share_number`` +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Read a contiguous sequence of bytes from one share in one bucket. The response body is the raw share data (i.e., ``application/octet-stream``). -The ``Range`` header may be used to request exactly one ``bytes`` range. +The ``Range`` header may be used to request exactly one ``bytes`` range, in which case the response code will be 206 (partial content). Interpretation and response behavior is as specified in RFC 7233 § 4.1. -Multiple ranges in a single request are *not* supported. +Multiple ranges in a single request are *not* supported; open-ended ranges are also not supported. + +If the response reads beyond the end of the data, the response may be shorter than the requested range. +The resulting ``Content-Range`` header will be consistent with the returned data. + +If the response to a query is an empty range, the ``NO CONTENT`` (204) response code will be used. Discussion `````````` @@ -609,8 +686,8 @@ Mutable Writing ~~~~~~~ -``POST /v1/mutable/:storage_index/read-test-write`` -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +``POST /storage/v1/mutable/:storage_index/read-test-write`` +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! General purpose read-test-and-write operation for mutable storage indexes. A mutable storage index is also called a "slot" @@ -619,16 +696,16 @@ The first write operation on a mutable storage index creates it (that is, there is no separate "create this storage index" operation as there is for the immutable storage index type). -The request body includes the secrets necessary to rewrite to the shares -along with test, read, and write vectors for the operation. +The request must include ``X-Tahoe-Authorization`` headers with write enabler and lease secrets:: + + X-Tahoe-Authorization: write-enabler + X-Tahoe-Authorization: lease-cancel-secret + X-Tahoe-Authorization: lease-renew-secret + +The request body includes test, read, and write vectors for the operation. For example:: { - "secrets": { - "write-enabler": "abcd", - "lease-renew": "efgh", - "lease-cancel": "ijkl" - }, "test-write-vectors": { 0: { "test": [{ @@ -665,22 +742,31 @@ As a result, if there is no data at all, an empty bytestring is returned no matt Reading ~~~~~~~ -``GET /v1/mutable/:storage_index/shares`` -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +``GET /storage/v1/mutable/:storage_index/shares`` +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -Retrieve a list indicating all shares available for the indicated storage index. -For example:: +Retrieve a set indicating all shares available for the indicated storage index. +For example (this is shown as list, since it will be list for JSON, but will be set for CBOR):: [1, 5] -``GET /v1/mutable/:storage_index?share=:s0&share=:sN&offset=:o1&size=:z0&offset=:oN&size=:zN`` +``GET /storage/v1/mutable/:storage_index/:share_number`` !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -Read data from the indicated mutable shares. -Just like ``GET /v1/mutable/:storage_index``. +Read data from the indicated mutable shares, just like ``GET /storage/v1/immutable/:storage_index`` -``POST /v1/mutable/:storage_index/:share_number/corrupt`` -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +The ``Range`` header may be used to request exactly one ``bytes`` range, in which case the response code will be 206 (partial content). +Interpretation and response behavior is as specified in RFC 7233 § 4.1. +Multiple ranges in a single request are *not* supported; open-ended ranges are also not supported. + +If the response reads beyond the end of the data, the response may be shorter than the requested range. +The resulting ``Content-Range`` header will be consistent with the returned data. + +If the response to a query is an empty range, the ``NO CONTENT`` (204) response code will be used. + + +``POST /storage/v1/mutable/:storage_index/:share_number/corrupt`` +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Advise the server the data read from the indicated share was corrupt. Just like the immutable version. @@ -693,44 +779,61 @@ Immutable Data 1. Create a bucket for storage index ``AAAAAAAAAAAAAAAA`` to hold two immutable shares, discovering that share ``1`` was already uploaded:: - POST /v1/immutable/AAAAAAAAAAAAAAAA - {"renew-secret": "efgh", "cancel-secret": "ijkl", - "share-numbers": [1, 7], "allocated-size": 48} + POST /storage/v1/immutable/AAAAAAAAAAAAAAAA + Authorization: Tahoe-LAFS nurl-swissnum + X-Tahoe-Authorization: lease-renew-secret efgh + X-Tahoe-Authorization: lease-cancel-secret jjkl + X-Tahoe-Authorization: upload-secret xyzf + + {"share-numbers": [1, 7], "allocated-size": 48} 200 OK {"already-have": [1], "allocated": [7]} #. Upload the content for immutable share ``7``:: - PATCH /v1/immutable/AAAAAAAAAAAAAAAA/7 + PATCH /storage/v1/immutable/AAAAAAAAAAAAAAAA/7 + Authorization: Tahoe-LAFS nurl-swissnum Content-Range: bytes 0-15/48 + X-Tahoe-Authorization: upload-secret xyzf 200 OK + { "required": [ {"begin": 16, "end": 48 } ] } - PATCH /v1/immutable/AAAAAAAAAAAAAAAA/7 + PATCH /storage/v1/immutable/AAAAAAAAAAAAAAAA/7 + Authorization: Tahoe-LAFS nurl-swissnum Content-Range: bytes 16-31/48 + X-Tahoe-Authorization: upload-secret xyzf 200 OK + { "required": [ {"begin": 32, "end": 48 } ] } - PATCH /v1/immutable/AAAAAAAAAAAAAAAA/7 + PATCH /storage/v1/immutable/AAAAAAAAAAAAAAAA/7 + Authorization: Tahoe-LAFS nurl-swissnum Content-Range: bytes 32-47/48 + X-Tahoe-Authorization: upload-secret xyzf 201 CREATED #. Download the content of the previously uploaded immutable share ``7``:: - GET /v1/immutable/AAAAAAAAAAAAAAAA?share=7&offset=0&size=48 + GET /storage/v1/immutable/AAAAAAAAAAAAAAAA?share=7 + Authorization: Tahoe-LAFS nurl-swissnum + Range: bytes=0-47 200 OK + Content-Range: bytes 0-47/48 #. Renew the lease on all immutable shares in bucket ``AAAAAAAAAAAAAAAA``:: - PUT /v1/lease/AAAAAAAAAAAAAAAA - {"renew-secret": "efgh", "cancel-secret": "ijkl"} + PUT /storage/v1/lease/AAAAAAAAAAAAAAAA + Authorization: Tahoe-LAFS nurl-swissnum + X-Tahoe-Authorization: lease-cancel-secret jjkl + X-Tahoe-Authorization: lease-renew-secret efgh 204 NO CONTENT @@ -742,13 +845,13 @@ The special test vector of size 1 but empty bytes will only pass if there is no existing share, otherwise it will read a byte which won't match `b""`:: - POST /v1/mutable/BBBBBBBBBBBBBBBB/read-test-write + POST /storage/v1/mutable/BBBBBBBBBBBBBBBB/read-test-write + Authorization: Tahoe-LAFS nurl-swissnum + X-Tahoe-Authorization: write-enabler abcd + X-Tahoe-Authorization: lease-cancel-secret efgh + X-Tahoe-Authorization: lease-renew-secret ijkl + { - "secrets": { - "write-enabler": "abcd", - "lease-renew": "efgh", - "lease-cancel": "ijkl" - }, "test-write-vectors": { 3: { "test": [{ @@ -774,13 +877,13 @@ otherwise it will read a byte which won't match `b""`:: #. Safely rewrite the contents of a known version of mutable share number ``3`` (or fail):: - POST /v1/mutable/BBBBBBBBBBBBBBBB/read-test-write + POST /storage/v1/mutable/BBBBBBBBBBBBBBBB/read-test-write + Authorization: Tahoe-LAFS nurl-swissnum + X-Tahoe-Authorization: write-enabler abcd + X-Tahoe-Authorization: lease-cancel-secret efgh + X-Tahoe-Authorization: lease-renew-secret ijkl + { - "secrets": { - "write-enabler": "abcd", - "lease-renew": "efgh", - "lease-cancel": "ijkl" - }, "test-write-vectors": { 3: { "test": [{ @@ -806,13 +909,20 @@ otherwise it will read a byte which won't match `b""`:: #. Download the contents of share number ``3``:: - GET /v1/mutable/BBBBBBBBBBBBBBBB?share=3&offset=0&size=10 + GET /storage/v1/mutable/BBBBBBBBBBBBBBBB?share=3 + Authorization: Tahoe-LAFS nurl-swissnum + Range: bytes=0-16 + + 200 OK + Content-Range: bytes 0-15/16 #. Renew the lease on previously uploaded mutable share in slot ``BBBBBBBBBBBBBBBB``:: - PUT /v1/lease/BBBBBBBBBBBBBBBB - {"renew-secret": "efgh", "cancel-secret": "ijkl"} + PUT /storage/v1/lease/BBBBBBBBBBBBBBBB + Authorization: Tahoe-LAFS nurl-swissnum + X-Tahoe-Authorization: lease-cancel-secret efgh + X-Tahoe-Authorization: lease-renew-secret ijkl 204 NO CONTENT diff --git a/docs/release-checklist.rst b/docs/release-checklist.rst index da1bbe16f..aa5531b59 100644 --- a/docs/release-checklist.rst +++ b/docs/release-checklist.rst @@ -3,9 +3,8 @@ Release Checklist ================= -These instructions were produced while making the 1.15.0 release. They -are based on the original instructions (in old revisions in the file -`docs/how_to_make_a_tahoe-lafs_release.org`). +This release checklist specifies a series of checks that anyone engaged in +releasing a version of Tahoe should follow. Any contributor can do the first part of the release preparation. Only certain contributors can perform other parts. These are the two main @@ -13,9 +12,12 @@ sections of this checklist (and could be done by different people). A final section describes how to announce the release. +This checklist is based on the original instructions (in old revisions in the file +`docs/how_to_make_a_tahoe-lafs_release.org`). + Any Contributor ---------------- +=============== Anyone who can create normal PRs should be able to complete this portion of the release process. @@ -32,13 +34,35 @@ Tuesday if you want to get anything in"). - Create a ticket for the release in Trac - Ticket number needed in next section +- Making first release? See `GPG Setup Instructions `__ to make sure you can sign releases. [One time setup] + +Get a clean checkout +```````````````````` + +The release proccess involves compressing source files and putting them in formats +suitable for distribution such as ``.tar.gz`` and ``zip``. That said, it's neccesary to +the release process begins with a clean checkout to avoid making a release with +previously generated files. + +- Inside the tahoe root dir run ``git clone . ../tahoe-release-x.x.x`` where (x.x.x is the release number such as 1.16.0). + +.. note:: + The above command would create a new directory at the same level as your original clone named ``tahoe-release-x.x.x``. You can name this folder however you want but it would be a good + practice to give it the release name. You MAY also discard this directory once the release + process is complete. + +Get into the release directory and install dependencies by running + +- cd ../tahoe-release-x.x.x (assuming you are still in your original clone) +- python -m venv venv +- ./venv/bin/pip install --editable .[test] Create Branch and Apply Updates ``````````````````````````````` -- Create a branch for release-candidates (e.g. `XXXX.release-1.15.0.rc0`) -- run `tox -e news` to produce a new NEWS.txt file (this does a commit) +- Create a branch for the release/candidate (e.g. ``XXXX.release-1.16.0``) +- run tox -e news to produce a new NEWS.txt file (this does a commit) - create the news for the release - newsfragments/.minor @@ -46,7 +70,7 @@ Create Branch and Apply Updates - manually fix NEWS.txt - - proper title for latest release ("Release 1.15.0" instead of "Release ...post1432") + - proper title for latest release ("Release 1.16.0" instead of "Release ...post1432") - double-check date (maybe release will be in the future) - spot-check the release notes (these come from the newsfragments files though so don't do heavy editing) @@ -54,7 +78,7 @@ Create Branch and Apply Updates - update "relnotes.txt" - - update all mentions of 1.14.0 -> 1.15.0 + - update all mentions of ``1.16.0`` to new and higher release version for example ``1.16.1`` - update "previous release" statement and date - summarize major changes - commit it @@ -63,14 +87,7 @@ Create Branch and Apply Updates - change the value given for `version` from `OLD.post1` to `NEW.post1` -- update "CREDITS" - - - are there any new contributors in this release? - - one way: git log release-1.14.0.. | grep Author | sort | uniq - - commit it - - update "docs/known_issues.rst" if appropriate -- update "docs/Installation/install-tahoe.rst" references to the new release - Push the branch to github - Create a (draft) PR; this should trigger CI (note that github doesn't let you create a PR without some changes on the branch so @@ -95,23 +112,33 @@ they will need to evaluate which contributors' signatures they trust. - (all steps above are completed) - sign the release - - git tag -s -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A -m "release Tahoe-LAFS-1.15.0rc0" tahoe-lafs-1.15.0rc0 - - (replace the key-id above with your own) + - git tag -s -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A -m "release Tahoe-LAFS-1.16.0rc0" tahoe-lafs-1.16.0rc0 + +.. note:: + - Replace the key-id above with your own, which can simply be your email if it's attached to your fingerprint. + - Don't forget to put the correct tag message and name. In this example, the tag message is "release Tahoe-LAFS-1.16.0rc0" and the tag name is ``tahoe-lafs-1.16.0rc0`` - build all code locally + - these should all pass: - - tox -e py27,codechecks,docs,integration + - tox -e py37,codechecks,docs,integration - these can fail (ideally they should not of course): - tox -e deprecations,upcoming-deprecations +- clone to a clean, local checkout (to avoid extra files being included in the release) + + - cd /tmp + - git clone /home/meejah/src/tahoe-lafs + - build tarballs - tox -e tarballs - - confirm it at least exists: - - ls dist/ | grep 1.15.0rc0 + - Confirm that release tarballs exist by runnig: + + - ls dist/ | grep 1.16.0rc0 - inspect and test the tarballs @@ -120,14 +147,12 @@ they will need to evaluate which contributors' signatures they trust. - when satisfied, sign the tarballs: - - gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0-py2-none-any.whl - - gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0.tar.bz2 - - gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0.tar.gz - - gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0.zip + - gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.16.0rc0-py2.py3-none-any.whl + - gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.16.0rc0.tar.gz Privileged Contributor ------------------------ +====================== Steps in this portion require special access to keys or infrastructure. For example, **access to tahoe-lafs.org** to upload @@ -155,14 +180,20 @@ need to be uploaded to https://tahoe-lafs.org in `~source/downloads` - secure-copy all release artifacts to the download area on the tahoe-lafs.org host machine. `~source/downloads` on there maps to - https://tahoe-lafs.org/downloads/ on the Web. -- scp dist/*1.15.0* username@tahoe-lafs.org:/home/source/downloads + https://tahoe-lafs.org/downloads/ on the Web: + + - scp dist/*1.15.0* username@tahoe-lafs.org:/home/source/downloads + - the following developers have access to do this: - exarkun - meejah - warner +Push the signed tag to the main repository: + +- git push origin tahoe-lafs-1.17.1 + For the actual release, the tarball and signature files need to be uploaded to PyPI as well. diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 000000000..39c4c20f0 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,4 @@ +sphinx +docutils<0.18 # https://github.com/sphinx-doc/sphinx/issues/9788 +recommonmark +sphinx_rtd_theme diff --git a/docs/running.rst b/docs/running.rst index 406c8200b..263448735 100644 --- a/docs/running.rst +++ b/docs/running.rst @@ -124,6 +124,35 @@ Tahoe-LAFS. .. _magic wormhole: https://magic-wormhole.io/ +Multiple Instances +------------------ + +Running multiple instances against the same configuration directory isn't supported. +This will lead to undefined behavior and could corrupt the configuration or state. + +We attempt to avoid this situation with a "pidfile"-style file in the config directory called ``running.process``. +There may be a parallel file called ``running.process.lock`` in existence. + +The ``.lock`` file exists to make sure only one process modifies ``running.process`` at once. +The lock file is managed by the `lockfile `_ library. +If you wish to make use of ``running.process`` for any reason you should also lock it and follow the semantics of lockfile. + +If ``running.process`` exists then it contains the PID and the creation-time of the process. +When no such file exists, there is no other process running on this configuration. +If there is a ``running.process`` file, it may be a leftover file or it may indicate that another process is running against this config. +To tell the difference, determine if the PID in the file exists currently. +If it does, check the creation-time of the process versus the one in the file. +If these match, there is another process currently running and using this config. +Otherwise, the file is stale -- it should be removed before starting Tahoe-LAFS. + +Some example Python code to check the above situations: + +.. literalinclude:: check_running.py + + + + + A note about small grids ------------------------ diff --git a/docs/specifications/url.rst b/docs/specifications/url.rst index 31fb05fad..12e2b8642 100644 --- a/docs/specifications/url.rst +++ b/docs/specifications/url.rst @@ -7,6 +7,8 @@ These are not to be confused with the URI-like capabilities Tahoe-LAFS uses to r An attempt is also made to outline the rationale for certain choices about these URLs. The intended audience for this document is Tahoe-LAFS maintainers and other developers interested in interoperating with Tahoe-LAFS or these URLs. +.. _furls: + Background ---------- @@ -31,6 +33,8 @@ The client's use of the swissnum is what allows the server to authorize the clie .. _`swiss number`: http://wiki.erights.org/wiki/Swiss_number +.. _NURLs: + NURLs ----- @@ -47,27 +51,27 @@ This can be considered to expand to "**N**\ ew URLs" or "Authe\ **N**\ ticating The anticipated use for a **NURL** will still be to establish a TLS connection to a peer. The protocol run over that TLS connection could be Foolscap though it is more likely to be an HTTP-based protocol (such as GBS). +Unlike fURLs, only a single net-loc is included, for consistency with other forms of URLs. +As a result, multiple NURLs may be available for a single server. + Syntax ------ The EBNF for a NURL is as follows:: - nurl = scheme, hash, "@", net-loc-list, "/", swiss-number, [ version1 ] - - scheme = "pb://" + nurl = tcp-nurl | tor-nurl | i2p-nurl + tcp-nurl = "pb://", hash, "@", tcp-loc, "/", swiss-number, [ version1 ] + tor-nurl = "pb+tor://", hash, "@", tcp-loc, "/", swiss-number, [ version1 ] + i2p-nurl = "pb+i2p://", hash, "@", i2p-loc, "/", swiss-number, [ version1 ] hash = unreserved - net-loc-list = net-loc, [ { ",", net-loc } ] - net-loc = tcp-loc | tor-loc | i2p-loc - - tcp-loc = [ "tcp:" ], hostname, [ ":" port ] - tor-loc = "tor:", hostname, [ ":" port ] - i2p-loc = "i2p:", i2p-addr, [ ":" port ] - - i2p-addr = { unreserved }, ".i2p" + tcp-loc = hostname, [ ":" port ] hostname = domain | IPv4address | IPv6address + i2p-loc = i2p-addr, [ ":" port ] + i2p-addr = { unreserved }, ".i2p" + swiss-number = segment version1 = "#v=1" @@ -87,11 +91,13 @@ These differences are separated into distinct versions. Version 0 --------- -A Foolscap fURL is considered the canonical definition of a version 0 NURL. +In theory, a Foolscap fURL with a single netloc is considered the canonical definition of a version 0 NURL. Notably, the hash component is defined as the base32-encoded SHA1 hash of the DER form of an x509v3 certificate. A version 0 NURL is identified by the absence of the ``v=1`` fragment. +In practice, real world fURLs may have more than one netloc, so lack of version fragment will likely just involve dispatching the fURL to a different parser. + Examples ~~~~~~~~ @@ -103,11 +109,8 @@ Version 1 The hash component of a version 1 NURL differs in three ways from the prior version. -1. The hash function used is SHA3-224 instead of SHA1. - The security of SHA1 `continues to be eroded`_. - Contrariwise SHA3 is currently the most recent addition to the SHA family by NIST. - The 224 bit instance is chosen to keep the output short and because it offers greater collision resistance than SHA1 was thought to offer even at its inception - (prior to security research showing actual collision resistance is lower). +1. The hash function used is SHA-256, to match RFC 7469. + The security of SHA1 `continues to be eroded`_; Latacora `SHA-2`_. 2. The hash is computed over the certificate's SPKI instead of the whole certificate. This allows certificate re-generation so long as the public key remains the same. This is useful to allow contact information to be updated or extension of validity period. @@ -122,7 +125,7 @@ The hash component of a version 1 NURL differs in three ways from the prior vers *all* certificate fields should be considered within the context of the relationship identified by the SPKI hash. 3. The hash is encoded using urlsafe-base64 (without padding) instead of base32. - This provides a more compact representation and minimizes the usability impacts of switching from a 160 bit hash to a 224 bit hash. + This provides a more compact representation and minimizes the usability impacts of switching from a 160 bit hash to a 256 bit hash. A version 1 NURL is identified by the presence of the ``v=1`` fragment. Though the length of the hash string (38 bytes) could also be used to differentiate it from a version 0 NURL, @@ -140,7 +143,8 @@ Examples * ``pb://azEu8vlRpnEeYm0DySQDeNY3Z2iJXHC_bsbaAw@localhost:47877/64i4aokv4ej#v=1`` .. _`continues to be eroded`: https://en.wikipedia.org/wiki/SHA-1#Cryptanalysis_and_validation -.. _`explored by the web community`: https://www.imperialviolet.org/2011/05/04/pinning.html +.. _`SHA-2`: https://latacora.micro.blog/2018/04/03/cryptographic-right-answers.html +.. _`explored by the web community`: https://www.rfc-editor.org/rfc/rfc7469 .. _Foolscap: https://github.com/warner/foolscap .. [1] ``foolscap.furl.decode_furl`` is taken as the canonical definition of the syntax of a fURL. diff --git a/docs/stats.rst b/docs/stats.rst index 50642d816..c7d69e0d2 100644 --- a/docs/stats.rst +++ b/docs/stats.rst @@ -264,3 +264,18 @@ the "tahoe-conf" file for notes about configuration and installing these plugins into a Munin environment. .. _Munin: http://munin-monitoring.org/ + + +Scraping Stats Values in OpenMetrics Format +=========================================== + +Time Series DataBase (TSDB) software like Prometheus_ and VictoriaMetrics_ can +parse statistics from the e.g. http://localhost:3456/statistics?t=openmetrics +URL in OpenMetrics_ format. Software like Grafana_ can then be used to graph +and alert on these numbers. You can find a pre-configured dashboard for +Grafana at https://grafana.com/grafana/dashboards/16894-tahoe-lafs/. + +.. _OpenMetrics: https://openmetrics.io/ +.. _Prometheus: https://prometheus.io/ +.. _VictoriaMetrics: https://victoriametrics.com/ +.. _Grafana: https://grafana.com/ diff --git a/integration/conftest.py b/integration/conftest.py index 39ff3b42b..e284b5cba 100644 --- a/integration/conftest.py +++ b/integration/conftest.py @@ -353,10 +353,23 @@ def storage_nodes(reactor, temp_dir, introducer, introducer_furl, flog_gatherer, nodes.append(process) return nodes +@pytest.fixture(scope="session") +def alice_sftp_client_key_path(temp_dir): + # The client SSH key path is typically going to be somewhere else (~/.ssh, + # typically), but for convenience sake for testing we'll put it inside node. + return join(temp_dir, "alice", "private", "ssh_client_rsa_key") @pytest.fixture(scope='session') @log_call(action_type=u"integration:alice", include_args=[], include_result=False) -def alice(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, request): +def alice( + reactor, + temp_dir, + introducer_furl, + flog_gatherer, + storage_nodes, + alice_sftp_client_key_path, + request, +): process = pytest_twisted.blockon( _create_node( reactor, request, temp_dir, introducer_furl, flog_gatherer, "alice", @@ -387,19 +400,13 @@ accounts.file = {accounts_path} """.format(ssh_key_path=host_ssh_key_path, accounts_path=accounts_path)) generate_ssh_key(host_ssh_key_path) - # 3. Add a SFTP access file with username/password and SSH key auth. - - # The client SSH key path is typically going to be somewhere else (~/.ssh, - # typically), but for convenience sake for testing we'll put it inside node. - client_ssh_key_path = join(process.node_dir, "private", "ssh_client_rsa_key") - generate_ssh_key(client_ssh_key_path) + # 3. Add a SFTP access file with an SSH key for auth. + generate_ssh_key(alice_sftp_client_key_path) # Pub key format is "ssh-rsa ". We want the key. - ssh_public_key = open(client_ssh_key_path + ".pub").read().strip().split()[1] + ssh_public_key = open(alice_sftp_client_key_path + ".pub").read().strip().split()[1] with open(accounts_path, "w") as f: f.write("""\ -alice password {rwcap} - -alice2 ssh-rsa {ssh_public_key} {rwcap} +alice-key ssh-rsa {ssh_public_key} {rwcap} """.format(rwcap=rwcap, ssh_public_key=ssh_public_key)) # 4. Restart the node with new SFTP config. @@ -455,10 +462,8 @@ def chutney(reactor, temp_dir): ) pytest_twisted.blockon(proto.done) - # XXX: Here we reset Chutney to the last revision known to work - # with Python 2, as a workaround for Chutney moving to Python 3. - # When this is no longer necessary, we will have to drop this and - # add '--depth=1' back to the above 'git clone' subprocess. + # XXX: Here we reset Chutney to a specific revision known to work, + # since there are no stability guarantees or releases yet. proto = _DumpOutputProtocol(None) reactor.spawnProcess( proto, @@ -466,7 +471,7 @@ def chutney(reactor, temp_dir): ( 'git', '-C', chutney_dir, 'reset', '--hard', - '99bd06c7554b9113af8c0877b6eca4ceb95dcbaa' + 'c825cba0bcd813c644c6ac069deeb7347d3200ee' ), env=environ, ) diff --git a/integration/install-tor.sh b/integration/install-tor.sh deleted file mode 100755 index 66fa64cb1..000000000 --- a/integration/install-tor.sh +++ /dev/null @@ -1,794 +0,0 @@ -#!/bin/bash - -# https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/ -set -euxo pipefail - -CODENAME=$(lsb_release --short --codename) - -if [ "$(id -u)" != "0" ]; then - SUDO="sudo" -else - SUDO="" -fi - -# Script to install Tor -echo "deb http://deb.torproject.org/torproject.org ${CODENAME} main" | ${SUDO} tee -a /etc/apt/sources.list -echo "deb-src http://deb.torproject.org/torproject.org ${CODENAME} main" | ${SUDO} tee -a /etc/apt/sources.list - -# # Install Tor repo signing key -${SUDO} apt-key add - </statistics/?t=json/` - - there is at least one storage-server connected + - there is at least one storage-server connected (configurable via + ``minimum_number_of_servers``) - every storage-server has a "last_received_data" and it is within the last `liveness` seconds @@ -506,8 +507,8 @@ def await_client_ready(tahoe, timeout=10, liveness=60*2): time.sleep(1) continue - if len(js['servers']) == 0: - print("waiting because no servers at all") + if len(js['servers']) < minimum_number_of_servers: + print("waiting because insufficient servers") time.sleep(1) continue server_times = [ diff --git a/misc/build_helpers/run-deprecations.py b/misc/build_helpers/run-deprecations.py index f99cf90aa..2ad335bd1 100644 --- a/misc/build_helpers/run-deprecations.py +++ b/misc/build_helpers/run-deprecations.py @@ -26,10 +26,10 @@ python run-deprecations.py [--warnings=STDERRFILE] [--package=PYTHONPACKAGE ] CO class RunPP(protocol.ProcessProtocol): def outReceived(self, data): self.stdout.write(data) - sys.stdout.write(data) + sys.stdout.write(str(data, sys.stdout.encoding)) def errReceived(self, data): self.stderr.write(data) - sys.stderr.write(data) + sys.stderr.write(str(data, sys.stdout.encoding)) def processEnded(self, reason): signal = reason.value.signal rc = reason.value.exitCode @@ -100,17 +100,19 @@ def run_command(main): pp.stdout.seek(0) for line in pp.stdout.readlines(): + line = str(line, sys.stdout.encoding) if match(line): add(line) # includes newline pp.stderr.seek(0) for line in pp.stderr.readlines(): + line = str(line, sys.stdout.encoding) if match(line): add(line) if warnings: if config["warnings"]: - with open(config["warnings"], "wb") as f: + with open(config["warnings"], "w") as f: print("".join(warnings), file=f) print("ERROR: %d deprecation warnings found" % len(warnings)) sys.exit(1) diff --git a/misc/build_helpers/update-version.py b/misc/build_helpers/update-version.py new file mode 100644 index 000000000..75b22edae --- /dev/null +++ b/misc/build_helpers/update-version.py @@ -0,0 +1,95 @@ +# +# this updates the (tagged) version of the software +# +# Any "options" are hard-coded in here (e.g. the GnuPG key to use) +# + +author = "meejah " + + +import sys +import time +from datetime import datetime +from packaging.version import Version + +from dulwich.repo import Repo +from dulwich.porcelain import ( + tag_list, + tag_create, + status, +) + +from twisted.internet.task import ( + react, +) +from twisted.internet.defer import ( + ensureDeferred, +) + + +def existing_tags(git): + versions = sorted( + Version(v.decode("utf8").lstrip("tahoe-lafs-")) + for v in tag_list(git) + if v.startswith(b"tahoe-lafs-") + ) + return versions + + +def create_new_version(git): + versions = existing_tags(git) + biggest = versions[-1] + + return Version( + "{}.{}.{}".format( + biggest.major, + biggest.minor + 1, + 0, + ) + ) + + +async def main(reactor): + git = Repo(".") + + st = status(git) + if any(st.staged.values()) or st.unstaged: + print("unclean checkout; aborting") + raise SystemExit(1) + + v = create_new_version(git) + if "--no-tag" in sys.argv: + print(v) + return + + print("Existing tags: {}".format("\n".join(str(x) for x in existing_tags(git)))) + print("New tag will be {}".format(v)) + + # the "tag time" is seconds from the epoch .. we quantize these to + # the start of the day in question, in UTC. + now = datetime.now() + s = now.utctimetuple() + ts = int( + time.mktime( + time.struct_time((s.tm_year, s.tm_mon, s.tm_mday, 0, 0, 0, 0, s.tm_yday, 0)) + ) + ) + tag_create( + repo=git, + tag="tahoe-lafs-{}".format(str(v)).encode("utf8"), + author=author.encode("utf8"), + message="Release {}".format(v).encode("utf8"), + annotated=True, + objectish=b"HEAD", + sign=author.encode("utf8"), + tag_time=ts, + tag_timezone=0, + ) + + print("Tag created locally, it is not pushed") + print("To push it run something like:") + print(" git push origin {}".format(v)) + + +if __name__ == "__main__": + react(lambda r: ensureDeferred(main(r))) diff --git a/misc/checkers/check_memory.py b/misc/checkers/check_memory.py deleted file mode 100644 index 268d77451..000000000 --- a/misc/checkers/check_memory.py +++ /dev/null @@ -1,522 +0,0 @@ -from __future__ import print_function - -import os, shutil, sys, urllib, time, stat, urlparse - -# Python 2 compatibility -from future.utils import PY2 -if PY2: - from future.builtins import str # noqa: F401 -from six.moves import cStringIO as StringIO - -from twisted.python.filepath import ( - FilePath, -) -from twisted.internet import defer, reactor, protocol, error -from twisted.application import service, internet -from twisted.web import client as tw_client -from twisted.python import log, procutils -from foolscap.api import Tub, fireEventually, flushEventualQueue - -from allmydata import client, introducer -from allmydata.immutable import upload -from allmydata.scripts import create_node -from allmydata.util import fileutil, pollmixin -from allmydata.util.fileutil import abspath_expanduser_unicode -from allmydata.util.encodingutil import get_filesystem_encoding - -from allmydata.scripts.common import ( - write_introducer, -) - -class StallableHTTPGetterDiscarder(tw_client.HTTPPageGetter, object): - full_speed_ahead = False - _bytes_so_far = 0 - stalled = None - def handleResponsePart(self, data): - self._bytes_so_far += len(data) - if not self.factory.do_stall: - return - if self.full_speed_ahead: - return - if self._bytes_so_far > 1e6+100: - if not self.stalled: - print("STALLING") - self.transport.pauseProducing() - self.stalled = reactor.callLater(10.0, self._resume_speed) - def _resume_speed(self): - print("RESUME SPEED") - self.stalled = None - self.full_speed_ahead = True - self.transport.resumeProducing() - def handleResponseEnd(self): - if self.stalled: - print("CANCEL") - self.stalled.cancel() - self.stalled = None - return tw_client.HTTPPageGetter.handleResponseEnd(self) - -class StallableDiscardingHTTPClientFactory(tw_client.HTTPClientFactory, object): - protocol = StallableHTTPGetterDiscarder - -def discardPage(url, stall=False, *args, **kwargs): - """Start fetching the URL, but stall our pipe after the first 1MB. - Wait 10 seconds, then resume downloading (and discarding) everything. - """ - # adapted from twisted.web.client.getPage . We can't just wrap or - # subclass because it provides no way to override the HTTPClientFactory - # that it creates. - scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) - assert scheme == 'http' - host, port = netloc, 80 - if ":" in host: - host, port = host.split(":") - port = int(port) - factory = StallableDiscardingHTTPClientFactory(url, *args, **kwargs) - factory.do_stall = stall - reactor.connectTCP(host, port, factory) - return factory.deferred - -class ChildDidNotStartError(Exception): - pass - -class SystemFramework(pollmixin.PollMixin): - numnodes = 7 - - def __init__(self, basedir, mode): - self.basedir = basedir = abspath_expanduser_unicode(str(basedir)) - if not (basedir + os.path.sep).startswith(abspath_expanduser_unicode(u".") + os.path.sep): - raise AssertionError("safety issue: basedir must be a subdir") - self.testdir = testdir = os.path.join(basedir, "test") - if os.path.exists(testdir): - shutil.rmtree(testdir) - fileutil.make_dirs(testdir) - self.sparent = service.MultiService() - self.sparent.startService() - self.proc = None - self.tub = Tub() - self.tub.setOption("expose-remote-exception-types", False) - self.tub.setServiceParent(self.sparent) - self.mode = mode - self.failed = False - self.keepalive_file = None - - def run(self): - framelog = os.path.join(self.basedir, "driver.log") - log.startLogging(open(framelog, "a"), setStdout=False) - log.msg("CHECK_MEMORY(mode=%s) STARTING" % self.mode) - #logfile = open(os.path.join(self.testdir, "log"), "w") - #flo = log.FileLogObserver(logfile) - #log.startLoggingWithObserver(flo.emit, setStdout=False) - d = fireEventually() - d.addCallback(lambda res: self.setUp()) - d.addCallback(lambda res: self.record_initial_memusage()) - d.addCallback(lambda res: self.make_nodes()) - d.addCallback(lambda res: self.wait_for_client_connected()) - d.addCallback(lambda res: self.do_test()) - d.addBoth(self.tearDown) - def _err(err): - self.failed = err - log.err(err) - print(err) - d.addErrback(_err) - def _done(res): - reactor.stop() - return res - d.addBoth(_done) - reactor.run() - if self.failed: - # raiseException doesn't work for CopiedFailures - self.failed.raiseException() - - def setUp(self): - #print("STARTING") - self.stats = {} - self.statsfile = open(os.path.join(self.basedir, "stats.out"), "a") - self.make_introducer() - d = self.start_client() - def _record_control_furl(control_furl): - self.control_furl = control_furl - #print("OBTAINING '%s'" % (control_furl,)) - return self.tub.getReference(self.control_furl) - d.addCallback(_record_control_furl) - def _record_control(control_rref): - self.control_rref = control_rref - d.addCallback(_record_control) - def _ready(res): - #print("CLIENT READY") - pass - d.addCallback(_ready) - return d - - def record_initial_memusage(self): - print() - print("Client started (no connections yet)") - d = self._print_usage() - d.addCallback(self.stash_stats, "init") - return d - - def wait_for_client_connected(self): - print() - print("Client connecting to other nodes..") - return self.control_rref.callRemote("wait_for_client_connections", - self.numnodes+1) - - def tearDown(self, passthrough): - # the client node will shut down in a few seconds - #os.remove(os.path.join(self.clientdir, client.Client.EXIT_TRIGGER_FILE)) - log.msg("shutting down SystemTest services") - if self.keepalive_file and os.path.exists(self.keepalive_file): - age = time.time() - os.stat(self.keepalive_file)[stat.ST_MTIME] - log.msg("keepalive file at shutdown was %ds old" % age) - d = defer.succeed(None) - if self.proc: - d.addCallback(lambda res: self.kill_client()) - d.addCallback(lambda res: self.sparent.stopService()) - d.addCallback(lambda res: flushEventualQueue()) - def _close_statsfile(res): - self.statsfile.close() - d.addCallback(_close_statsfile) - d.addCallback(lambda res: passthrough) - return d - - def make_introducer(self): - iv_basedir = os.path.join(self.testdir, "introducer") - os.mkdir(iv_basedir) - self.introducer = introducer.IntroducerNode(basedir=iv_basedir) - self.introducer.setServiceParent(self) - self.introducer_furl = self.introducer.introducer_url - - def make_nodes(self): - root = FilePath(self.testdir) - self.nodes = [] - for i in range(self.numnodes): - nodedir = root.child("node%d" % (i,)) - private = nodedir.child("private") - private.makedirs() - write_introducer(nodedir, "default", self.introducer_url) - config = ( - "[client]\n" - "shares.happy = 1\n" - "[storage]\n" - ) - # the only tests for which we want the internal nodes to actually - # retain shares are the ones where somebody's going to download - # them. - if self.mode in ("download", "download-GET", "download-GET-slow"): - # retain shares - pass - else: - # for these tests, we tell the storage servers to pretend to - # accept shares, but really just throw them out, since we're - # only testing upload and not download. - config += "debug_discard = true\n" - if self.mode in ("receive",): - # for this mode, the client-under-test gets all the shares, - # so our internal nodes can refuse requests - config += "readonly = true\n" - nodedir.child("tahoe.cfg").setContent(config) - c = client.Client(basedir=nodedir.path) - c.setServiceParent(self) - self.nodes.append(c) - # the peers will start running, eventually they will connect to each - # other and the introducer - - def touch_keepalive(self): - if os.path.exists(self.keepalive_file): - age = time.time() - os.stat(self.keepalive_file)[stat.ST_MTIME] - log.msg("touching keepalive file, was %ds old" % age) - f = open(self.keepalive_file, "w") - f.write("""\ -If the node notices this file at startup, it will poll every 5 seconds and -terminate if the file is more than 10 seconds old, or if it has been deleted. -If the test harness has an internal failure and neglects to kill off the node -itself, this helps to avoid leaving processes lying around. The contents of -this file are ignored. - """) - f.close() - - def start_client(self): - # this returns a Deferred that fires with the client's control.furl - log.msg("MAKING CLIENT") - # self.testdir is an absolute Unicode path - clientdir = self.clientdir = os.path.join(self.testdir, u"client") - clientdir_str = clientdir.encode(get_filesystem_encoding()) - quiet = StringIO() - create_node.create_node({'basedir': clientdir}, out=quiet) - log.msg("DONE MAKING CLIENT") - write_introducer(clientdir, "default", self.introducer_furl) - # now replace tahoe.cfg - # set webport=0 and then ask the node what port it picked. - f = open(os.path.join(clientdir, "tahoe.cfg"), "w") - f.write("[node]\n" - "web.port = tcp:0:interface=127.0.0.1\n" - "[client]\n" - "shares.happy = 1\n" - "[storage]\n" - ) - - if self.mode in ("upload-self", "receive"): - # accept and store shares, to trigger the memory consumption bugs - pass - else: - # don't accept any shares - f.write("readonly = true\n") - ## also, if we do receive any shares, throw them away - #f.write("debug_discard = true") - if self.mode == "upload-self": - pass - f.close() - self.keepalive_file = os.path.join(clientdir, - client.Client.EXIT_TRIGGER_FILE) - # now start updating the mtime. - self.touch_keepalive() - ts = internet.TimerService(1.0, self.touch_keepalive) - ts.setServiceParent(self.sparent) - - pp = ClientWatcher() - self.proc_done = pp.d = defer.Deferred() - logfile = os.path.join(self.basedir, "client.log") - tahoes = procutils.which("tahoe") - if not tahoes: - raise RuntimeError("unable to find a 'tahoe' executable") - cmd = [tahoes[0], "run", ".", "-l", logfile] - env = os.environ.copy() - self.proc = reactor.spawnProcess(pp, cmd[0], cmd, env, path=clientdir_str) - log.msg("CLIENT STARTED") - - # now we wait for the client to get started. we're looking for the - # control.furl file to appear. - furl_file = os.path.join(clientdir, "private", "control.furl") - url_file = os.path.join(clientdir, "node.url") - def _check(): - if pp.ended and pp.ended.value.status != 0: - # the twistd process ends normally (with rc=0) if the child - # is successfully launched. It ends abnormally (with rc!=0) - # if the child cannot be launched. - raise ChildDidNotStartError("process ended while waiting for startup") - return os.path.exists(furl_file) - d = self.poll(_check, 0.1) - # once it exists, wait a moment before we read from it, just in case - # it hasn't finished writing the whole thing. Ideally control.furl - # would be created in some atomic fashion, or made non-readable until - # it's ready, but I can't think of an easy way to do that, and I - # think the chances that we'll observe a half-write are pretty low. - def _stall(res): - d2 = defer.Deferred() - reactor.callLater(0.1, d2.callback, None) - return d2 - d.addCallback(_stall) - def _read(res): - # read the node's URL - self.webish_url = open(url_file, "r").read().strip() - if self.webish_url[-1] == "/": - # trim trailing slash, since the rest of the code wants it gone - self.webish_url = self.webish_url[:-1] - f = open(furl_file, "r") - furl = f.read() - return furl.strip() - d.addCallback(_read) - return d - - - def kill_client(self): - # returns a Deferred that fires when the process exits. This may only - # be called once. - try: - self.proc.signalProcess("INT") - except error.ProcessExitedAlready: - pass - return self.proc_done - - - def create_data(self, name, size): - filename = os.path.join(self.testdir, name + ".data") - f = open(filename, "wb") - block = "a" * 8192 - while size > 0: - l = min(size, 8192) - f.write(block[:l]) - size -= l - return filename - - def stash_stats(self, stats, name): - self.statsfile.write("%s %s: %d\n" % (self.mode, name, stats['VmPeak'])) - self.statsfile.flush() - self.stats[name] = stats['VmPeak'] - - def POST(self, urlpath, **fields): - url = self.webish_url + urlpath - sepbase = "boogabooga" - sep = "--" + sepbase - form = [] - form.append(sep) - form.append('Content-Disposition: form-data; name="_charset"') - form.append('') - form.append('UTF-8') - form.append(sep) - for name, value in fields.iteritems(): - if isinstance(value, tuple): - filename, value = value - form.append('Content-Disposition: form-data; name="%s"; ' - 'filename="%s"' % (name, filename)) - else: - form.append('Content-Disposition: form-data; name="%s"' % name) - form.append('') - form.append(value) - form.append(sep) - form[-1] += "--" - body = "\r\n".join(form) + "\r\n" - headers = {"content-type": "multipart/form-data; boundary=%s" % sepbase, - } - return tw_client.getPage(url, method="POST", postdata=body, - headers=headers, followRedirect=False) - - def GET_discard(self, urlpath, stall): - url = self.webish_url + urlpath + "?filename=dummy-get.out" - return discardPage(url, stall) - - def _print_usage(self, res=None): - d = self.control_rref.callRemote("get_memory_usage") - def _print(stats): - print("VmSize: %9d VmPeak: %9d" % (stats["VmSize"], - stats["VmPeak"])) - return stats - d.addCallback(_print) - return d - - def _do_upload(self, res, size, files, uris): - name = '%d' % size - print() - print("uploading %s" % name) - if self.mode in ("upload", "upload-self"): - d = self.control_rref.callRemote("upload_random_data_from_file", - size, - convergence="check-memory") - elif self.mode == "upload-POST": - data = "a" * size - url = "/uri" - d = self.POST(url, t="upload", file=("%d.data" % size, data)) - elif self.mode in ("receive", - "download", "download-GET", "download-GET-slow"): - # mode=receive: upload the data from a local peer, so that the - # client-under-test receives and stores the shares - # - # mode=download*: upload the data from a local peer, then have - # the client-under-test download it. - # - # we need to wait until the uploading node has connected to all - # peers, since the wait_for_client_connections() above doesn't - # pay attention to our self.nodes[] and their connections. - files[name] = self.create_data(name, size) - u = self.nodes[0].getServiceNamed("uploader") - d = self.nodes[0].debug_wait_for_client_connections(self.numnodes+1) - d.addCallback(lambda res: - u.upload(upload.FileName(files[name], - convergence="check-memory"))) - d.addCallback(lambda results: results.get_uri()) - else: - raise ValueError("unknown mode=%s" % self.mode) - def _complete(uri): - uris[name] = uri - print("uploaded %s" % name) - d.addCallback(_complete) - return d - - def _do_download(self, res, size, uris): - if self.mode not in ("download", "download-GET", "download-GET-slow"): - return - name = '%d' % size - print("downloading %s" % name) - uri = uris[name] - - if self.mode == "download": - d = self.control_rref.callRemote("download_to_tempfile_and_delete", - uri) - elif self.mode == "download-GET": - url = "/uri/%s" % uri - d = self.GET_discard(urllib.quote(url), stall=False) - elif self.mode == "download-GET-slow": - url = "/uri/%s" % uri - d = self.GET_discard(urllib.quote(url), stall=True) - - def _complete(res): - print("downloaded %s" % name) - return res - d.addCallback(_complete) - return d - - def do_test(self): - #print("CLIENT STARTED") - #print("FURL", self.control_furl) - #print("RREF", self.control_rref) - #print() - kB = 1000; MB = 1000*1000 - files = {} - uris = {} - - d = self._print_usage() - d.addCallback(self.stash_stats, "0B") - - for i in range(10): - d.addCallback(self._do_upload, 10*kB+i, files, uris) - d.addCallback(self._do_download, 10*kB+i, uris) - d.addCallback(self._print_usage) - d.addCallback(self.stash_stats, "10kB") - - for i in range(3): - d.addCallback(self._do_upload, 10*MB+i, files, uris) - d.addCallback(self._do_download, 10*MB+i, uris) - d.addCallback(self._print_usage) - d.addCallback(self.stash_stats, "10MB") - - for i in range(1): - d.addCallback(self._do_upload, 50*MB+i, files, uris) - d.addCallback(self._do_download, 50*MB+i, uris) - d.addCallback(self._print_usage) - d.addCallback(self.stash_stats, "50MB") - - #for i in range(1): - # d.addCallback(self._do_upload, 100*MB+i, files, uris) - # d.addCallback(self._do_download, 100*MB+i, uris) - # d.addCallback(self._print_usage) - #d.addCallback(self.stash_stats, "100MB") - - #d.addCallback(self.stall) - def _done(res): - print("FINISHING") - d.addCallback(_done) - return d - - def stall(self, res): - d = defer.Deferred() - reactor.callLater(5, d.callback, None) - return d - - -class ClientWatcher(protocol.ProcessProtocol, object): - ended = False - def outReceived(self, data): - print("OUT:", data) - def errReceived(self, data): - print("ERR:", data) - def processEnded(self, reason): - self.ended = reason - self.d.callback(None) - - -if __name__ == '__main__': - mode = "upload" - if len(sys.argv) > 1: - mode = sys.argv[1] - if sys.maxsize == 2147483647: - bits = "32" - elif sys.maxsize == 9223372036854775807: - bits = "64" - else: - bits = "?" - print("%s-bit system (sys.maxsize=%d)" % (bits, sys.maxsize)) - # put the logfile and stats.out in _test_memory/ . These stick around. - # put the nodes and other files in _test_memory/test/ . These are - # removed each time we run. - sf = SystemFramework("_test_memory", mode) - sf.run() diff --git a/misc/checkers/check_speed.py b/misc/checkers/check_speed.py deleted file mode 100644 index 2fce53387..000000000 --- a/misc/checkers/check_speed.py +++ /dev/null @@ -1,234 +0,0 @@ -from __future__ import print_function - -import os, sys -from twisted.internet import reactor, defer -from twisted.python import log -from twisted.application import service -from foolscap.api import Tub, fireEventually - -MB = 1000000 - -class SpeedTest(object): - DO_IMMUTABLE = True - DO_MUTABLE_CREATE = True - DO_MUTABLE = True - - def __init__(self, test_client_dir): - #self.real_stderr = sys.stderr - log.startLogging(open("st.log", "a"), setStdout=False) - f = open(os.path.join(test_client_dir, "private", "control.furl"), "r") - self.control_furl = f.read().strip() - f.close() - self.base_service = service.MultiService() - self.failed = None - self.upload_times = {} - self.download_times = {} - - def run(self): - print("STARTING") - d = fireEventually() - d.addCallback(lambda res: self.setUp()) - d.addCallback(lambda res: self.do_test()) - d.addBoth(self.tearDown) - def _err(err): - self.failed = err - log.err(err) - print(err) - d.addErrback(_err) - def _done(res): - reactor.stop() - return res - d.addBoth(_done) - reactor.run() - if self.failed: - print("EXCEPTION") - print(self.failed) - sys.exit(1) - - def setUp(self): - self.base_service.startService() - self.tub = Tub() - self.tub.setOption("expose-remote-exception-types", False) - self.tub.setServiceParent(self.base_service) - d = self.tub.getReference(self.control_furl) - def _gotref(rref): - self.client_rref = rref - print("Got Client Control reference") - return self.stall(5) - d.addCallback(_gotref) - return d - - def stall(self, delay, result=None): - d = defer.Deferred() - reactor.callLater(delay, d.callback, result) - return d - - def record_times(self, times, key): - print("TIME (%s): %s up, %s down" % (key, times[0], times[1])) - self.upload_times[key], self.download_times[key] = times - - def one_test(self, res, name, count, size, mutable): - # values for 'mutable': - # False (upload a different CHK file for each 'count') - # "create" (upload different contents into a new SSK file) - # "upload" (upload different contents into the same SSK file. The - # time consumed does not include the creation of the file) - d = self.client_rref.callRemote("speed_test", count, size, mutable) - d.addCallback(self.record_times, name) - return d - - def measure_rtt(self, res): - # use RIClient.get_nodeid() to measure the foolscap-level RTT - d = self.client_rref.callRemote("measure_peer_response_time") - def _got(res): - assert len(res) # need at least one peer - times = res.values() - self.total_rtt = sum(times) - self.average_rtt = sum(times) / len(times) - self.max_rtt = max(times) - print("num-peers: %d" % len(times)) - print("total-RTT: %f" % self.total_rtt) - print("average-RTT: %f" % self.average_rtt) - print("max-RTT: %f" % self.max_rtt) - d.addCallback(_got) - return d - - def do_test(self): - print("doing test") - d = defer.succeed(None) - d.addCallback(self.one_test, "startup", 1, 1000, False) #ignore this one - d.addCallback(self.measure_rtt) - - if self.DO_IMMUTABLE: - # immutable files - d.addCallback(self.one_test, "1x 200B", 1, 200, False) - d.addCallback(self.one_test, "10x 200B", 10, 200, False) - def _maybe_do_100x_200B(res): - if self.upload_times["10x 200B"] < 5: - print("10x 200B test went too fast, doing 100x 200B test") - return self.one_test(None, "100x 200B", 100, 200, False) - return - d.addCallback(_maybe_do_100x_200B) - d.addCallback(self.one_test, "1MB", 1, 1*MB, False) - d.addCallback(self.one_test, "10MB", 1, 10*MB, False) - def _maybe_do_100MB(res): - if self.upload_times["10MB"] > 30: - print("10MB test took too long, skipping 100MB test") - return - return self.one_test(None, "100MB", 1, 100*MB, False) - d.addCallback(_maybe_do_100MB) - - if self.DO_MUTABLE_CREATE: - # mutable file creation - d.addCallback(self.one_test, "10x 200B SSK creation", 10, 200, - "create") - - if self.DO_MUTABLE: - # mutable file upload/download - d.addCallback(self.one_test, "10x 200B SSK", 10, 200, "upload") - def _maybe_do_100x_200B_SSK(res): - if self.upload_times["10x 200B SSK"] < 5: - print("10x 200B SSK test went too fast, doing 100x 200B SSK") - return self.one_test(None, "100x 200B SSK", 100, 200, - "upload") - return - d.addCallback(_maybe_do_100x_200B_SSK) - d.addCallback(self.one_test, "1MB SSK", 1, 1*MB, "upload") - - d.addCallback(self.calculate_speeds) - return d - - def calculate_speeds(self, res): - # time = A*size+B - # we assume that A*200bytes is negligible - - if self.DO_IMMUTABLE: - # upload - if "100x 200B" in self.upload_times: - B = self.upload_times["100x 200B"] / 100 - else: - B = self.upload_times["10x 200B"] / 10 - print("upload per-file time: %.3fs" % B) - print("upload per-file times-avg-RTT: %f" % (B / self.average_rtt)) - print("upload per-file times-total-RTT: %f" % (B / self.total_rtt)) - A1 = 1*MB / (self.upload_times["1MB"] - B) # in bytes per second - print("upload speed (1MB):", self.number(A1, "Bps")) - A2 = 10*MB / (self.upload_times["10MB"] - B) - print("upload speed (10MB):", self.number(A2, "Bps")) - if "100MB" in self.upload_times: - A3 = 100*MB / (self.upload_times["100MB"] - B) - print("upload speed (100MB):", self.number(A3, "Bps")) - - # download - if "100x 200B" in self.download_times: - B = self.download_times["100x 200B"] / 100 - else: - B = self.download_times["10x 200B"] / 10 - print("download per-file time: %.3fs" % B) - print("download per-file times-avg-RTT: %f" % (B / self.average_rtt)) - print("download per-file times-total-RTT: %f" % (B / self.total_rtt)) - A1 = 1*MB / (self.download_times["1MB"] - B) # in bytes per second - print("download speed (1MB):", self.number(A1, "Bps")) - A2 = 10*MB / (self.download_times["10MB"] - B) - print("download speed (10MB):", self.number(A2, "Bps")) - if "100MB" in self.download_times: - A3 = 100*MB / (self.download_times["100MB"] - B) - print("download speed (100MB):", self.number(A3, "Bps")) - - if self.DO_MUTABLE_CREATE: - # SSK creation - B = self.upload_times["10x 200B SSK creation"] / 10 - print("create per-file time SSK: %.3fs" % B) - - if self.DO_MUTABLE: - # upload SSK - if "100x 200B SSK" in self.upload_times: - B = self.upload_times["100x 200B SSK"] / 100 - else: - B = self.upload_times["10x 200B SSK"] / 10 - print("upload per-file time SSK: %.3fs" % B) - A1 = 1*MB / (self.upload_times["1MB SSK"] - B) # in bytes per second - print("upload speed SSK (1MB):", self.number(A1, "Bps")) - - # download SSK - if "100x 200B SSK" in self.download_times: - B = self.download_times["100x 200B SSK"] / 100 - else: - B = self.download_times["10x 200B SSK"] / 10 - print("download per-file time SSK: %.3fs" % B) - A1 = 1*MB / (self.download_times["1MB SSK"] - B) # in bytes per - # second - print("download speed SSK (1MB):", self.number(A1, "Bps")) - - def number(self, value, suffix=""): - scaling = 1 - if value < 1: - fmt = "%1.2g%s" - elif value < 100: - fmt = "%.1f%s" - elif value < 1000: - fmt = "%d%s" - elif value < 1e6: - fmt = "%.2fk%s"; scaling = 1e3 - elif value < 1e9: - fmt = "%.2fM%s"; scaling = 1e6 - elif value < 1e12: - fmt = "%.2fG%s"; scaling = 1e9 - elif value < 1e15: - fmt = "%.2fT%s"; scaling = 1e12 - elif value < 1e18: - fmt = "%.2fP%s"; scaling = 1e15 - else: - fmt = "huge! %g%s" - return fmt % (value / scaling, suffix) - - def tearDown(self, res): - d = self.base_service.stopService() - d.addCallback(lambda ignored: res) - return d - - -if __name__ == '__main__': - test_client_dir = sys.argv[1] - st = SpeedTest(test_client_dir) - st.run() diff --git a/misc/operations_helpers/getmem.py b/misc/operations_helpers/getmem.py deleted file mode 100644 index b3c6285fe..000000000 --- a/misc/operations_helpers/getmem.py +++ /dev/null @@ -1,20 +0,0 @@ -#! /usr/bin/env python - -from __future__ import print_function - -from foolscap import Tub -from foolscap.eventual import eventually -import sys -from twisted.internet import reactor - -def go(): - t = Tub() - d = t.getReference(sys.argv[1]) - d.addCallback(lambda rref: rref.callRemote("get_memory_usage")) - def _got(res): - print(res) - reactor.stop() - d.addCallback(_got) - -eventually(go) -reactor.run() diff --git a/misc/python3/Makefile b/misc/python3/Makefile deleted file mode 100644 index f0ef8b12a..000000000 --- a/misc/python3/Makefile +++ /dev/null @@ -1,53 +0,0 @@ -# Python 3 porting targets -# -# NOTE: this Makefile requires GNU make - -### Defensive settings for make: -# https://tech.davis-hansson.com/p/make/ -SHELL := bash -.ONESHELL: -.SHELLFLAGS := -xeu -o pipefail -c -.SILENT: -.DELETE_ON_ERROR: -MAKEFLAGS += --warn-undefined-variables -MAKEFLAGS += --no-builtin-rules - - -# Top-level, phony targets - -.PHONY: default -default: - @echo "no default target" - -.PHONY: test-py3-all-before -## Log the output of running all tests under Python 3 before changes -test-py3-all-before: ../../.tox/make-test-py3-all-old.log -.PHONY: test-py3-all-diff -## Compare the output of running all tests under Python 3 after changes -test-py3-all-diff: ../../.tox/make-test-py3-all.diff - - -# Real targets - -# Gauge the impact of changes on Python 3 compatibility -# Compare the output from running all tests under Python 3 before and after changes. -# Before changes: -# `$ rm -f .tox/make-test-py3-all-*.log && make .tox/make-test-py3-all-old.log` -# After changes: -# `$ make .tox/make-test-py3-all.diff` -$(foreach side,old new,../../.tox/make-test-py3-all-$(side).log): - cd "../../" - tox --develop --notest -e py36-coverage - (make VIRTUAL_ENV=./.tox/py36-coverage TEST_SUITE=allmydata \ - test-venv-coverage || true) | \ - sed -E 's/\([0-9]+\.[0-9]{3} secs\)/(#.### secs)/' | \ - tee "./misc/python3/$(@)" -../../.tox/make-test-py3-all.diff: ../../.tox/make-test-py3-all-new.log - (diff -u "$(<:%-new.log=%-old.log)" "$(<)" || true) | tee "$(@)" - -# Locate modules that are candidates for naively converting `unicode` -> `str`. -# List all Python source files that reference `unicode` but don't reference `str` -../../.tox/py3-unicode-no-str.ls: - cd "../../" - find src -type f -iname '*.py' -exec grep -l -E '\Wunicode\W' '{}' ';' | \ - xargs grep -L '\Wstr\W' | xargs ls -ld | tee "./misc/python3/$(@)" diff --git a/newsfragments/1549.installation b/newsfragments/1549.installation deleted file mode 100644 index cbb91cea5..000000000 --- a/newsfragments/1549.installation +++ /dev/null @@ -1 +0,0 @@ -Tahoe-LAFS now requires Twisted 19.10.0 or newer. As a result, it now has a transitive dependency on bcrypt. diff --git a/newsfragments/3037.other b/newsfragments/3037.other deleted file mode 100644 index 947dc8f60..000000000 --- a/newsfragments/3037.other +++ /dev/null @@ -1 +0,0 @@ -The "Great Black Swamp" proposed specification has been expanded to include two lease management APIs. \ No newline at end of file diff --git a/newsfragments/3326.installation b/newsfragments/3326.installation deleted file mode 100644 index 2a3a64e32..000000000 --- a/newsfragments/3326.installation +++ /dev/null @@ -1 +0,0 @@ -Debian 8 support has been replaced with Debian 10 support. diff --git a/newsfragments/3399.feature b/newsfragments/3399.feature deleted file mode 100644 index d30a91679..000000000 --- a/newsfragments/3399.feature +++ /dev/null @@ -1 +0,0 @@ -Added 'typechecks' environment for tox running mypy and performing static typechecks. diff --git a/newsfragments/3404.minor b/newsfragments/3404.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3428.minor b/newsfragments/3428.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3432.minor b/newsfragments/3432.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3433.installation b/newsfragments/3433.installation deleted file mode 100644 index 3c06e53d3..000000000 --- a/newsfragments/3433.installation +++ /dev/null @@ -1 +0,0 @@ -Tahoe-LAFS no longer depends on Nevow. \ No newline at end of file diff --git a/newsfragments/3434.minor b/newsfragments/3434.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3435.minor b/newsfragments/3435.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3454.minor b/newsfragments/3454.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3459.minor b/newsfragments/3459.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3460.minor b/newsfragments/3460.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3465.minor b/newsfragments/3465.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3466.minor b/newsfragments/3466.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3467.minor b/newsfragments/3467.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3468.minor b/newsfragments/3468.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3470.minor b/newsfragments/3470.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3471.minor b/newsfragments/3471.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3472.minor b/newsfragments/3472.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3473.minor b/newsfragments/3473.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3474.minor b/newsfragments/3474.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3475.minor b/newsfragments/3475.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3477.minor b/newsfragments/3477.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3478.minor b/newsfragments/3478.minor deleted file mode 100644 index 8b1378917..000000000 --- a/newsfragments/3478.minor +++ /dev/null @@ -1 +0,0 @@ - diff --git a/newsfragments/3479.minor b/newsfragments/3479.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3481.minor b/newsfragments/3481.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3482.minor b/newsfragments/3482.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3483.minor b/newsfragments/3483.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3485.minor b/newsfragments/3485.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3486.installation b/newsfragments/3486.installation deleted file mode 100644 index 7b24956b2..000000000 --- a/newsfragments/3486.installation +++ /dev/null @@ -1 +0,0 @@ -Tahoe-LAFS now requires the `netifaces` Python package and no longer requires the external `ip`, `ifconfig`, or `route.exe` executables. diff --git a/newsfragments/3488.minor b/newsfragments/3488.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3490.minor b/newsfragments/3490.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3491.minor b/newsfragments/3491.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3492.minor b/newsfragments/3492.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3493.minor b/newsfragments/3493.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3496.minor b/newsfragments/3496.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3497.installation b/newsfragments/3497.installation deleted file mode 100644 index 4a50be97e..000000000 --- a/newsfragments/3497.installation +++ /dev/null @@ -1 +0,0 @@ -The Tahoe-LAFS project no longer commits to maintaining binary packages for all dependencies at . Please use PyPI instead. diff --git a/newsfragments/3499.minor b/newsfragments/3499.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3500.minor b/newsfragments/3500.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3501.minor b/newsfragments/3501.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3502.minor b/newsfragments/3502.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3503.other b/newsfragments/3503.other deleted file mode 100644 index 5d0c681b6..000000000 --- a/newsfragments/3503.other +++ /dev/null @@ -1 +0,0 @@ -The specification section of the Tahoe-LAFS documentation now includes explicit discussion of the security properties of Foolscap "fURLs" on which it depends. diff --git a/newsfragments/3504.configuration b/newsfragments/3504.configuration deleted file mode 100644 index 9ff74482c..000000000 --- a/newsfragments/3504.configuration +++ /dev/null @@ -1 +0,0 @@ -The ``[client]introducer.furl`` configuration item is now deprecated in favor of the ``private/introducers.yaml`` file. \ No newline at end of file diff --git a/newsfragments/3509.bugfix b/newsfragments/3509.bugfix deleted file mode 100644 index 4d633feab..000000000 --- a/newsfragments/3509.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix regression that broke flogtool results on Python 2. \ No newline at end of file diff --git a/newsfragments/3510.bugfix b/newsfragments/3510.bugfix deleted file mode 100644 index d4a2bd5dc..000000000 --- a/newsfragments/3510.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a logging regression on Python 2 involving unicode strings. \ No newline at end of file diff --git a/newsfragments/3511.minor b/newsfragments/3511.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3513.minor b/newsfragments/3513.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3514.minor b/newsfragments/3514.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3515.minor b/newsfragments/3515.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3517.minor b/newsfragments/3517.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3518.removed b/newsfragments/3518.removed deleted file mode 100644 index 460af5142..000000000 --- a/newsfragments/3518.removed +++ /dev/null @@ -1 +0,0 @@ -Announcements delivered through the introducer system are no longer automatically annotated with copious information about the Tahoe-LAFS software version nor the versions of its dependencies. diff --git a/newsfragments/3520.minor b/newsfragments/3520.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3521.minor b/newsfragments/3521.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3522.minor b/newsfragments/3522.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3523.minor b/newsfragments/3523.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3524.minor b/newsfragments/3524.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3525.minor b/newsfragments/3525.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3528.minor b/newsfragments/3528.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3529.minor b/newsfragments/3529.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3532.minor b/newsfragments/3532.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3533.minor b/newsfragments/3533.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3534.minor b/newsfragments/3534.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3536.minor b/newsfragments/3536.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3537.minor b/newsfragments/3537.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3539.bugfix b/newsfragments/3539.bugfix deleted file mode 100644 index ed4aeb9af..000000000 --- a/newsfragments/3539.bugfix +++ /dev/null @@ -1 +0,0 @@ -Certain implementation-internal weakref KeyErrors are now handled and should no longer cause user-initiated operations to fail. diff --git a/newsfragments/3542.minor b/newsfragments/3542.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3544.minor b/newsfragments/3544.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3545.other b/newsfragments/3545.other deleted file mode 100644 index fd8adc37b..000000000 --- a/newsfragments/3545.other +++ /dev/null @@ -1 +0,0 @@ -The README, revised by Viktoriia with feedback from the team, is now more focused on the developer community and provides more information about Tahoe-LAFS, why it's important, and how someone can use it or start contributing to it. \ No newline at end of file diff --git a/newsfragments/3546.minor b/newsfragments/3546.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3547.minor b/newsfragments/3547.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3549.removed b/newsfragments/3549.removed deleted file mode 100644 index 53c7a7de1..000000000 --- a/newsfragments/3549.removed +++ /dev/null @@ -1 +0,0 @@ -The stats gatherer, broken since at least Tahoe-LAFS 1.13.0, has been removed. The ``[client]stats_gatherer.furl`` configuration item in ``tahoe.cfg`` is no longer allowed. The Tahoe-LAFS project recommends using a third-party metrics aggregation tool instead. diff --git a/newsfragments/3550.removed b/newsfragments/3550.removed deleted file mode 100644 index 2074bf676..000000000 --- a/newsfragments/3550.removed +++ /dev/null @@ -1 +0,0 @@ -The deprecated ``tahoe`` start, restart, stop, and daemonize sub-commands have been removed. \ No newline at end of file diff --git a/newsfragments/3551.minor b/newsfragments/3551.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3552.minor b/newsfragments/3552.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3553.minor b/newsfragments/3553.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3555.minor b/newsfragments/3555.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3557.minor b/newsfragments/3557.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3558.minor b/newsfragments/3558.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3560.minor b/newsfragments/3560.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3563.minor b/newsfragments/3563.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3564.minor b/newsfragments/3564.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3565.minor b/newsfragments/3565.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3566.minor b/newsfragments/3566.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3567.minor b/newsfragments/3567.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3568.minor b/newsfragments/3568.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3572.minor b/newsfragments/3572.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3574.minor b/newsfragments/3574.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3575.minor b/newsfragments/3575.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3576.minor b/newsfragments/3576.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3577.minor b/newsfragments/3577.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3578.minor b/newsfragments/3578.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3579.minor b/newsfragments/3579.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3580.minor b/newsfragments/3580.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3582.minor b/newsfragments/3582.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3583.removed b/newsfragments/3583.removed deleted file mode 100644 index a3fce48be..000000000 --- a/newsfragments/3583.removed +++ /dev/null @@ -1 +0,0 @@ -FTP is no longer supported by Tahoe-LAFS. Please use the SFTP support instead. \ No newline at end of file diff --git a/newsfragments/3584.bugfix b/newsfragments/3584.bugfix deleted file mode 100644 index faf57713b..000000000 --- a/newsfragments/3584.bugfix +++ /dev/null @@ -1 +0,0 @@ -SFTP public key auth likely works more consistently, and SFTP in general was previously broken. \ No newline at end of file diff --git a/newsfragments/3587.minor b/newsfragments/3587.minor deleted file mode 100644 index 8b1378917..000000000 --- a/newsfragments/3587.minor +++ /dev/null @@ -1 +0,0 @@ - diff --git a/newsfragments/3588.incompat b/newsfragments/3588.incompat deleted file mode 100644 index 402ae8479..000000000 --- a/newsfragments/3588.incompat +++ /dev/null @@ -1 +0,0 @@ -The Tahoe command line now always uses UTF-8 to decode its arguments, regardless of locale. diff --git a/newsfragments/3588.minor b/newsfragments/3588.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3589.minor b/newsfragments/3589.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3590.bugfix b/newsfragments/3590.bugfix deleted file mode 100644 index aa504a5e3..000000000 --- a/newsfragments/3590.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixed issue where redirecting old-style URIs (/uri/?uri=...) didn't work. \ No newline at end of file diff --git a/newsfragments/3591.minor b/newsfragments/3591.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3592.minor b/newsfragments/3592.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3593.minor b/newsfragments/3593.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3594.minor b/newsfragments/3594.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3595.minor b/newsfragments/3595.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3596.minor b/newsfragments/3596.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3599.minor b/newsfragments/3599.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3600.minor b/newsfragments/3600.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3603.minor.rst b/newsfragments/3603.minor.rst deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3605.minor b/newsfragments/3605.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3606.minor b/newsfragments/3606.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3607.minor b/newsfragments/3607.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3608.minor b/newsfragments/3608.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3611.minor b/newsfragments/3611.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3612.minor b/newsfragments/3612.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3613.minor b/newsfragments/3613.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3615.minor b/newsfragments/3615.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3616.minor b/newsfragments/3616.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3617.minor b/newsfragments/3617.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3618.minor b/newsfragments/3618.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3619.minor b/newsfragments/3619.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3620.minor b/newsfragments/3620.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3621.minor b/newsfragments/3621.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3623.minor b/newsfragments/3623.minor deleted file mode 100644 index 8b1378917..000000000 --- a/newsfragments/3623.minor +++ /dev/null @@ -1 +0,0 @@ - diff --git a/newsfragments/3624.minor b/newsfragments/3624.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3625.minor b/newsfragments/3625.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3626.minor b/newsfragments/3626.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3628.minor b/newsfragments/3628.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3629.feature b/newsfragments/3629.feature deleted file mode 100644 index cdca48a18..000000000 --- a/newsfragments/3629.feature +++ /dev/null @@ -1 +0,0 @@ -The NixOS-packaged Tahoe-LAFS now knows its own version. diff --git a/newsfragments/3630.minor b/newsfragments/3630.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3631.minor b/newsfragments/3631.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3632.minor b/newsfragments/3632.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3633.installation b/newsfragments/3633.installation deleted file mode 100644 index 8f6d7efdd..000000000 --- a/newsfragments/3633.installation +++ /dev/null @@ -1 +0,0 @@ -Tahoe-LAFS now uses a forked version of txi2p (named txi2p-tahoe) with Python 3 support. diff --git a/newsfragments/3634.minor b/newsfragments/3634.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3635.minor b/newsfragments/3635.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3637.minor b/newsfragments/3637.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3638.minor b/newsfragments/3638.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3640.minor b/newsfragments/3640.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3642.minor b/newsfragments/3642.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3644.other b/newsfragments/3644.other deleted file mode 100644 index 4b159e45d..000000000 --- a/newsfragments/3644.other +++ /dev/null @@ -1 +0,0 @@ -The "Great Black Swamp" proposed specification has been changed use ``v=1`` as the URL version identifier. \ No newline at end of file diff --git a/newsfragments/3645.minor b/newsfragments/3645.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3646.minor b/newsfragments/3646.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3647.minor b/newsfragments/3647.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3648.minor b/newsfragments/3648.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3649.minor b/newsfragments/3649.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3650.bugfix b/newsfragments/3650.bugfix deleted file mode 100644 index 09a810239..000000000 --- a/newsfragments/3650.bugfix +++ /dev/null @@ -1 +0,0 @@ -``tahoe invite`` will now read share encoding/placement configuration values from a Tahoe client node configuration file if they are not given on the command line, instead of raising an unhandled exception. diff --git a/newsfragments/3651.minor b/newsfragments/3651.minor deleted file mode 100644 index 9a2f5a0ed..000000000 --- a/newsfragments/3651.minor +++ /dev/null @@ -1 +0,0 @@ -We added documentation detailing the project's ticket triage process diff --git a/newsfragments/3652.removed b/newsfragments/3652.removed deleted file mode 100644 index a3e964702..000000000 --- a/newsfragments/3652.removed +++ /dev/null @@ -1 +0,0 @@ -Removed support for the Account Server frontend authentication type. diff --git a/newsfragments/3653.minor b/newsfragments/3653.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3654.minor b/newsfragments/3654.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3655.minor b/newsfragments/3655.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3656.minor b/newsfragments/3656.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3657.minor b/newsfragments/3657.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3658.minor b/newsfragments/3658.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3659.documentation b/newsfragments/3659.documentation deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3662.minor b/newsfragments/3662.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3663.other b/newsfragments/3663.other deleted file mode 100644 index 62abf2666..000000000 --- a/newsfragments/3663.other +++ /dev/null @@ -1 +0,0 @@ -You can run `make livehtml` in docs directory to invoke sphinx-autobuild. diff --git a/newsfragments/3664.documentation b/newsfragments/3664.documentation deleted file mode 100644 index ab5de8884..000000000 --- a/newsfragments/3664.documentation +++ /dev/null @@ -1 +0,0 @@ -Documentation now has its own towncrier category. diff --git a/newsfragments/3666.documentation b/newsfragments/3666.documentation deleted file mode 100644 index 3f9e34777..000000000 --- a/newsfragments/3666.documentation +++ /dev/null @@ -1 +0,0 @@ -`tox -e docs` will treat warnings about docs as errors. diff --git a/newsfragments/3667.minor b/newsfragments/3667.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3669.minor b/newsfragments/3669.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3670.minor b/newsfragments/3670.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3671.minor b/newsfragments/3671.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3672.minor b/newsfragments/3672.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3674.minor b/newsfragments/3674.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3675.minor b/newsfragments/3675.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3676.minor b/newsfragments/3676.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3677.documentation b/newsfragments/3677.documentation deleted file mode 100644 index 51730e765..000000000 --- a/newsfragments/3677.documentation +++ /dev/null @@ -1 +0,0 @@ -The visibility of the Tahoe-LAFS logo has been improved for "dark" themed viewing. diff --git a/newsfragments/3678.minor b/newsfragments/3678.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3679.minor b/newsfragments/3679.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3681.minor b/newsfragments/3681.minor deleted file mode 100644 index bc84b6b8f..000000000 --- a/newsfragments/3681.minor +++ /dev/null @@ -1,8 +0,0 @@ -(The below text is no longer valid: netifaces has released a 64-bit -Python 2.7 wheel for Windows. Ticket #3733 made the switch in CI. We -should be able to test and run Tahoe-LAFS without needing vcpython27 -now.) - -Tahoe-LAFS CI now runs tests only on 32-bit Windows. Microsoft has -removed vcpython27 compiler downloads from their site, and Tahoe-LAFS -needs vcpython27 to build and install netifaces on 64-bit Windows. diff --git a/newsfragments/3682.documentation b/newsfragments/3682.documentation deleted file mode 100644 index 5cf78bd90..000000000 --- a/newsfragments/3682.documentation +++ /dev/null @@ -1 +0,0 @@ -A cheatsheet-style document for contributors was created at CONTRIBUTORS.rst \ No newline at end of file diff --git a/newsfragments/3683.minor b/newsfragments/3683.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3686.minor b/newsfragments/3686.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3687.minor b/newsfragments/3687.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3691.minor b/newsfragments/3691.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3692.minor b/newsfragments/3692.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3699.minor b/newsfragments/3699.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3700.minor b/newsfragments/3700.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3701.minor b/newsfragments/3701.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3702.minor b/newsfragments/3702.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3703.minor b/newsfragments/3703.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3704.minor b/newsfragments/3704.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3705.minor b/newsfragments/3705.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3707.minor b/newsfragments/3707.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3708.minor b/newsfragments/3708.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3709.minor b/newsfragments/3709.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3711.minor b/newsfragments/3711.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3712.installation b/newsfragments/3712.installation deleted file mode 100644 index b80e1558b..000000000 --- a/newsfragments/3712.installation +++ /dev/null @@ -1 +0,0 @@ -The Nix package now includes correct version information. \ No newline at end of file diff --git a/newsfragments/3713.minor b/newsfragments/3713.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3714.minor b/newsfragments/3714.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3715.minor b/newsfragments/3715.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3716.incompat b/newsfragments/3716.incompat deleted file mode 100644 index aa03eea47..000000000 --- a/newsfragments/3716.incompat +++ /dev/null @@ -1 +0,0 @@ -tahoe backup's --exclude-from has been renamed to --exclude-from-utf-8, and correspondingly requires the file to be UTF-8 encoded. \ No newline at end of file diff --git a/newsfragments/3717.minor b/newsfragments/3717.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3718.minor b/newsfragments/3718.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3721.documentation b/newsfragments/3721.documentation deleted file mode 100644 index 36ae33236..000000000 --- a/newsfragments/3721.documentation +++ /dev/null @@ -1 +0,0 @@ -Our IRC channel, #tahoe-lafs, has been moved to irc.libera.chat. diff --git a/newsfragments/3722.minor b/newsfragments/3722.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3723.minor b/newsfragments/3723.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3726.documentation b/newsfragments/3726.documentation deleted file mode 100644 index fb94fff32..000000000 --- a/newsfragments/3726.documentation +++ /dev/null @@ -1 +0,0 @@ -Tahoe-LAFS project is now registered with Libera.Chat IRC network. diff --git a/newsfragments/3727.minor b/newsfragments/3727.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3728.minor b/newsfragments/3728.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3729.minor b/newsfragments/3729.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3730.minor b/newsfragments/3730.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3731.minor b/newsfragments/3731.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3732.minor b/newsfragments/3732.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3733.installation b/newsfragments/3733.installation deleted file mode 100644 index c1cac649b..000000000 --- a/newsfragments/3733.installation +++ /dev/null @@ -1 +0,0 @@ -Use netifaces 0.11.0 wheel package from PyPI.org if you use 64-bit Python 2.7 on Windows. VCPython27 downloads are no longer available at Microsoft's website, which has made building Python 2.7 wheel packages of Python libraries with C extensions (such as netifaces) on Windows difficult. diff --git a/newsfragments/3734.minor b/newsfragments/3734.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3735.minor b/newsfragments/3735.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3736.minor b/newsfragments/3736.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3738.bugfix b/newsfragments/3738.bugfix deleted file mode 100644 index 6a4bc1cd9..000000000 --- a/newsfragments/3738.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix regression where uploading files with non-ASCII names failed. \ No newline at end of file diff --git a/newsfragments/3739.bugfix b/newsfragments/3739.bugfix deleted file mode 100644 index 875941cf8..000000000 --- a/newsfragments/3739.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixed annoying UnicodeWarning message on Python 2 when running CLI tools. \ No newline at end of file diff --git a/newsfragments/3741.minor b/newsfragments/3741.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3743.minor b/newsfragments/3743.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3744.minor b/newsfragments/3744.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3745.minor b/newsfragments/3745.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3746.minor b/newsfragments/3746.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3747.documentation b/newsfragments/3747.documentation deleted file mode 100644 index a2559a6a0..000000000 --- a/newsfragments/3747.documentation +++ /dev/null @@ -1 +0,0 @@ -Rewriting the installation guide for Tahoe-LAFS. diff --git a/newsfragments/3749.documentation b/newsfragments/3749.documentation deleted file mode 100644 index 554564a0b..000000000 --- a/newsfragments/3749.documentation +++ /dev/null @@ -1 +0,0 @@ -Documentation and installation links in the README have been fixed. diff --git a/newsfragments/3751.minor b/newsfragments/3751.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3757.other b/newsfragments/3757.other deleted file mode 100644 index 3d2d3f272..000000000 --- a/newsfragments/3757.other +++ /dev/null @@ -1 +0,0 @@ -Refactored test_introducer in web tests to use custom base test cases \ No newline at end of file diff --git a/newsfragments/3759.minor b/newsfragments/3759.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3760.minor b/newsfragments/3760.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3763.minor b/newsfragments/3763.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3764.documentation b/newsfragments/3764.documentation deleted file mode 100644 index d473cd27c..000000000 --- a/newsfragments/3764.documentation +++ /dev/null @@ -1 +0,0 @@ -The Great Black Swamp proposed specification now includes sample interactions to demonstrate expected usage patterns. \ No newline at end of file diff --git a/newsfragments/3765.documentation b/newsfragments/3765.documentation deleted file mode 100644 index a3b59c4d6..000000000 --- a/newsfragments/3765.documentation +++ /dev/null @@ -1 +0,0 @@ -The Great Black Swamp proposed specification now includes a glossary. \ No newline at end of file diff --git a/newsfragments/3769.documentation b/newsfragments/3769.documentation deleted file mode 100644 index 3d4ef7d4c..000000000 --- a/newsfragments/3769.documentation +++ /dev/null @@ -1 +0,0 @@ -The Great Black Swamp specification now allows parallel upload of immutable share data. diff --git a/newsfragments/3773.minor b/newsfragments/3773.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3774.documentation b/newsfragments/3774.documentation deleted file mode 100644 index d58105966..000000000 --- a/newsfragments/3774.documentation +++ /dev/null @@ -1 +0,0 @@ -There is now a specification for the scheme which Tahoe-LAFS storage clients use to derive their lease renewal secrets. diff --git a/newsfragments/3777.documentation b/newsfragments/3777.documentation deleted file mode 100644 index 7635cc1e6..000000000 --- a/newsfragments/3777.documentation +++ /dev/null @@ -1 +0,0 @@ -The Great Black Swamp proposed specification now has a simplified interface for reading data from immutable shares. diff --git a/newsfragments/3779.bugfix b/newsfragments/3779.bugfix deleted file mode 100644 index 073046474..000000000 --- a/newsfragments/3779.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixed bug where share corruption events were not logged on storage servers running on Windows. \ No newline at end of file diff --git a/newsfragments/3781.minor b/newsfragments/3781.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3782.documentation b/newsfragments/3782.documentation deleted file mode 100644 index 5e5cecc13..000000000 --- a/newsfragments/3782.documentation +++ /dev/null @@ -1 +0,0 @@ -tahoe-dev mailing list is now at tahoe-dev@lists.tahoe-lafs.org. diff --git a/newsfragments/2928.minor b/newsfragments/3783.minor similarity index 100% rename from newsfragments/2928.minor rename to newsfragments/3783.minor diff --git a/newsfragments/3784.minor b/newsfragments/3784.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3785.documentation b/newsfragments/3785.documentation deleted file mode 100644 index 4eb268f79..000000000 --- a/newsfragments/3785.documentation +++ /dev/null @@ -1 +0,0 @@ -The Great Black Swamp specification now describes the required authorization scheme. diff --git a/newsfragments/3786.feature b/newsfragments/3786.feature deleted file mode 100644 index ecbfc0372..000000000 --- a/newsfragments/3786.feature +++ /dev/null @@ -1 +0,0 @@ -tahoe-lafs now provides its statistics also in OpenMetrics format (for Prometheus et. al.) at `/statistics?t=openmetrics`. diff --git a/newsfragments/3792.minor b/newsfragments/3792.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3793.minor b/newsfragments/3793.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3795.minor b/newsfragments/3795.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3797.minor b/newsfragments/3797.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3798.minor b/newsfragments/3798.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3799.minor b/newsfragments/3799.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3801.bugfix b/newsfragments/3801.bugfix deleted file mode 100644 index 504b3999d..000000000 --- a/newsfragments/3801.bugfix +++ /dev/null @@ -1 +0,0 @@ -When uploading an immutable, overlapping writes that include conflicting data are rejected. In practice, this likely didn't happen in real-world usage. \ No newline at end of file diff --git a/newsfragments/3805.minor b/newsfragments/3805.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3806.minor b/newsfragments/3806.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3808.installation b/newsfragments/3808.installation deleted file mode 100644 index 157f08a0c..000000000 --- a/newsfragments/3808.installation +++ /dev/null @@ -1 +0,0 @@ -Tahoe-LAFS now supports running on NixOS 21.05 with Python 3. diff --git a/newsfragments/3810.minor b/newsfragments/3810.minor deleted file mode 100644 index e69de29bb..000000000 diff --git a/newsfragments/3902.feature b/newsfragments/3902.feature new file mode 100644 index 000000000..2477d0ae6 --- /dev/null +++ b/newsfragments/3902.feature @@ -0,0 +1 @@ +The new HTTPS-based storage server is now enabled transparently on the same port as the Foolscap server. This will not have any user-facing impact until the HTTPS storage protocol is supported in clients as well. \ No newline at end of file diff --git a/newsfragments/3283.minor b/newsfragments/3904.minor similarity index 100% rename from newsfragments/3283.minor rename to newsfragments/3904.minor diff --git a/newsfragments/3922.documentation b/newsfragments/3922.documentation new file mode 100644 index 000000000..d0232dd02 --- /dev/null +++ b/newsfragments/3922.documentation @@ -0,0 +1 @@ +Several minor errors in the Great Black Swamp proposed specification document have been fixed. \ No newline at end of file diff --git a/newsfragments/3314.minor b/newsfragments/3927.minor similarity index 100% rename from newsfragments/3314.minor rename to newsfragments/3927.minor diff --git a/newsfragments/3384.minor b/newsfragments/3928.minor similarity index 100% rename from newsfragments/3384.minor rename to newsfragments/3928.minor diff --git a/newsfragments/3938.bugfix b/newsfragments/3938.bugfix new file mode 100644 index 000000000..c2778cfdf --- /dev/null +++ b/newsfragments/3938.bugfix @@ -0,0 +1 @@ +Work with (and require) newer versions of pycddl. \ No newline at end of file diff --git a/newsfragments/3385.minor b/newsfragments/3940.minor similarity index 100% rename from newsfragments/3385.minor rename to newsfragments/3940.minor diff --git a/newsfragments/3390.minor b/newsfragments/3944.minor similarity index 100% rename from newsfragments/3390.minor rename to newsfragments/3944.minor diff --git a/nix/autobahn.nix b/nix/autobahn.nix deleted file mode 100644 index 83148c4f8..000000000 --- a/nix/autobahn.nix +++ /dev/null @@ -1,34 +0,0 @@ -{ lib, buildPythonPackage, fetchPypi, isPy3k, - six, txaio, twisted, zope_interface, cffi, futures, - mock, pytest, cryptography, pynacl -}: -buildPythonPackage rec { - pname = "autobahn"; - version = "19.8.1"; - - src = fetchPypi { - inherit pname version; - sha256 = "294e7381dd54e73834354832604ae85567caf391c39363fed0ea2bfa86aa4304"; - }; - - propagatedBuildInputs = [ six txaio twisted zope_interface cffi cryptography pynacl ] ++ - (lib.optionals (!isPy3k) [ futures ]); - - checkInputs = [ mock pytest ]; - checkPhase = '' - runHook preCheck - USE_TWISTED=true py.test $out - runHook postCheck - ''; - - # Tests do no seem to be compatible yet with pytest 5.1 - # https://github.com/crossbario/autobahn-python/issues/1235 - doCheck = false; - - meta = with lib; { - description = "WebSocket and WAMP in Python for Twisted and asyncio."; - homepage = "https://crossbar.io/autobahn"; - license = licenses.mit; - maintainers = with maintainers; [ nand0p ]; - }; -} diff --git a/nix/collections-extended.nix b/nix/collections-extended.nix deleted file mode 100644 index 3f1ad165a..000000000 --- a/nix/collections-extended.nix +++ /dev/null @@ -1,19 +0,0 @@ -{ lib, buildPythonPackage, fetchPypi }: -buildPythonPackage rec { - pname = "collections-extended"; - version = "1.0.3"; - - src = fetchPypi { - inherit pname version; - sha256 = "0lb69x23asd68n0dgw6lzxfclavrp2764xsnh45jm97njdplznkw"; - }; - - # Tests aren't in tarball, for 1.0.3 at least. - doCheck = false; - - meta = with lib; { - homepage = https://github.com/mlenzen/collections-extended; - description = "Extra Python Collections - bags (multisets), setlists (unique list / indexed set), RangeMap and IndexedDict"; - license = licenses.asl20; - }; -} diff --git a/nix/default.nix b/nix/default.nix deleted file mode 100644 index bd7460c2f..000000000 --- a/nix/default.nix +++ /dev/null @@ -1,7 +0,0 @@ -# This is the main entrypoint for the Tahoe-LAFS derivation. -{ pkgs ? import { } }: -# Add our Python packages to nixpkgs to simplify the expression for the -# Tahoe-LAFS derivation. -let pkgs' = pkgs.extend (import ./overlays.nix); -# Evaluate the expression for our Tahoe-LAFS derivation. -in pkgs'.python2.pkgs.callPackage ./tahoe-lafs.nix { } diff --git a/nix/eliot.nix b/nix/eliot.nix deleted file mode 100644 index c5975e990..000000000 --- a/nix/eliot.nix +++ /dev/null @@ -1,31 +0,0 @@ -{ lib, buildPythonPackage, fetchPypi, zope_interface, pyrsistent, boltons -, hypothesis, testtools, pytest }: -buildPythonPackage rec { - pname = "eliot"; - version = "1.7.0"; - - src = fetchPypi { - inherit pname version; - sha256 = "0ylyycf717s5qsrx8b9n6m38vyj2k8328lfhn8y6r31824991wv8"; - }; - - postPatch = '' - substituteInPlace setup.py \ - --replace "boltons >= 19.0.1" boltons - ''; - - # A seemingly random subset of the test suite fails intermittently. After - # Tahoe-LAFS is ported to Python 3 we can update to a newer Eliot and, if - # the test suite continues to fail, maybe it will be more likely that we can - # have upstream fix it for us. - doCheck = false; - - checkInputs = [ testtools pytest hypothesis ]; - propagatedBuildInputs = [ zope_interface pyrsistent boltons ]; - - meta = with lib; { - homepage = https://github.com/itamarst/eliot/; - description = "Logging library that tells you why it happened"; - license = licenses.asl20; - }; -} diff --git a/nix/future.nix b/nix/future.nix deleted file mode 100644 index 814b7c1b5..000000000 --- a/nix/future.nix +++ /dev/null @@ -1,35 +0,0 @@ -{ lib -, buildPythonPackage -, fetchPypi -}: - -buildPythonPackage rec { - pname = "future"; - version = "0.18.2"; - - src = fetchPypi { - inherit pname version; - sha256 = "sha256:0zakvfj87gy6mn1nba06sdha63rn4njm7bhh0wzyrxhcny8avgmi"; - }; - - doCheck = false; - - meta = { - description = "Clean single-source support for Python 3 and 2"; - longDescription = '' - python-future is the missing compatibility layer between Python 2 and - Python 3. It allows you to use a single, clean Python 3.x-compatible - codebase to support both Python 2 and Python 3 with minimal overhead. - - It provides future and past packages with backports and forward ports - of features from Python 3 and 2. It also comes with futurize and - pasteurize, customized 2to3-based scripts that helps you to convert - either Py2 or Py3 code easily to support both Python 2 and 3 in a - single clean Py3-style codebase, module by module. - ''; - homepage = https://python-future.org; - downloadPage = https://github.com/PythonCharmers/python-future/releases; - license = with lib.licenses; [ mit ]; - maintainers = with lib.maintainers; [ prikhi ]; - }; -} diff --git a/nix/overlays.nix b/nix/overlays.nix deleted file mode 100644 index fbd0ce3bb..000000000 --- a/nix/overlays.nix +++ /dev/null @@ -1,33 +0,0 @@ -self: super: { - python27 = super.python27.override { - packageOverrides = python-self: python-super: { - # eliot is not part of nixpkgs at all at this time. - eliot = python-self.pythonPackages.callPackage ./eliot.nix { }; - - # NixOS autobahn package has trollius as a dependency, although - # it is optional. Trollius is unmaintained and fails on CI. - autobahn = python-super.pythonPackages.callPackage ./autobahn.nix { }; - - # Porting to Python 3 is greatly aided by the future package. A - # slightly newer version than appears in nixos 19.09 is helpful. - future = python-super.pythonPackages.callPackage ./future.nix { }; - - # Need version of pyutil that supports Python 3. The version in 19.09 - # is too old. - pyutil = python-super.pythonPackages.callPackage ./pyutil.nix { }; - - # Need a newer version of Twisted, too. - twisted = python-super.pythonPackages.callPackage ./twisted.nix { }; - - # collections-extended is not part of nixpkgs at this time. - collections-extended = python-super.pythonPackages.callPackage ./collections-extended.nix { }; - }; - }; - - python39 = super.python39.override { - packageOverrides = python-self: python-super: { - # collections-extended is not part of nixpkgs at this time. - collections-extended = python-super.pythonPackages.callPackage ./collections-extended.nix { }; - }; - }; -} diff --git a/nix/py3.nix b/nix/py3.nix deleted file mode 100644 index 34ede49dd..000000000 --- a/nix/py3.nix +++ /dev/null @@ -1,7 +0,0 @@ -# This is the main entrypoint for the Tahoe-LAFS derivation. -{ pkgs ? import { } }: -# Add our Python packages to nixpkgs to simplify the expression for the -# Tahoe-LAFS derivation. -let pkgs' = pkgs.extend (import ./overlays.nix); -# Evaluate the expression for our Tahoe-LAFS derivation. -in pkgs'.python39.pkgs.callPackage ./tahoe-lafs.nix { } diff --git a/nix/pyutil.nix b/nix/pyutil.nix deleted file mode 100644 index 6852c2acc..000000000 --- a/nix/pyutil.nix +++ /dev/null @@ -1,48 +0,0 @@ -{ stdenv -, buildPythonPackage -, fetchPypi -, setuptoolsDarcs -, setuptoolsTrial -, simplejson -, twisted -, isPyPy -}: - -buildPythonPackage rec { - pname = "pyutil"; - version = "3.3.0"; - - src = fetchPypi { - inherit pname version; - sha256 = "8c4d4bf668c559186389bb9bce99e4b1b871c09ba252a756ccaacd2b8f401848"; - }; - - buildInputs = [ setuptoolsDarcs setuptoolsTrial ] ++ (if doCheck then [ simplejson ] else []); - propagatedBuildInputs = [ twisted ]; - - # Tests fail because they try to write new code into the twisted - # package, apparently some kind of plugin. - doCheck = false; - - prePatch = stdenv.lib.optionalString isPyPy '' - grep -rl 'utf-8-with-signature-unix' ./ | xargs sed -i -e "s|utf-8-with-signature-unix|utf-8|g" - ''; - - meta = with stdenv.lib; { - description = "Pyutil, a collection of mature utilities for Python programmers"; - - longDescription = '' - These are a few data structures, classes and functions which - we've needed over many years of Python programming and which - seem to be of general use to other Python programmers. Many of - the modules that have existed in pyutil over the years have - subsequently been obsoleted by new features added to the - Python language or its standard library, thus showing that - we're not alone in wanting tools like these. - ''; - - homepage = "http://allmydata.org/trac/pyutil"; - license = licenses.gpl2Plus; - }; - -} \ No newline at end of file diff --git a/nix/sources.json b/nix/sources.json new file mode 100644 index 000000000..950151416 --- /dev/null +++ b/nix/sources.json @@ -0,0 +1,62 @@ +{ + "mach-nix": { + "branch": "master", + "description": "Create highly reproducible python environments", + "homepage": "", + "owner": "davhau", + "repo": "mach-nix", + "rev": "bdc97ba6b2ecd045a467b008cff4ae337b6a7a6b", + "sha256": "12b3jc0g0ak6s93g3ifvdpwxbyqx276k1kl66bpwz8a67qjbcbwf", + "type": "tarball", + "url": "https://github.com/davhau/mach-nix/archive/bdc97ba6b2ecd045a467b008cff4ae337b6a7a6b.tar.gz", + "url_template": "https://github.com///archive/.tar.gz" + }, + "niv": { + "branch": "master", + "description": "Easy dependency management for Nix projects", + "homepage": "https://github.com/nmattia/niv", + "owner": "nmattia", + "repo": "niv", + "rev": "5830a4dd348d77e39a0f3c4c762ff2663b602d4c", + "sha256": "1d3lsrqvci4qz2hwjrcnd8h5vfkg8aypq3sjd4g3izbc8frwz5sm", + "type": "tarball", + "url": "https://github.com/nmattia/niv/archive/5830a4dd348d77e39a0f3c4c762ff2663b602d4c.tar.gz", + "url_template": "https://github.com///archive/.tar.gz" + }, + "nixpkgs-21.05": { + "branch": "nixos-21.05", + "description": "Nix Packages collection", + "homepage": "", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "0fd9ee1aa36ce865ad273f4f07fdc093adeb5c00", + "sha256": "1mr2qgv5r2nmf6s3gqpcjj76zpsca6r61grzmqngwm0xlh958smx", + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/0fd9ee1aa36ce865ad273f4f07fdc093adeb5c00.tar.gz", + "url_template": "https://github.com///archive/.tar.gz" + }, + "nixpkgs-21.11": { + "branch": "nixos-21.11", + "description": "Nix Packages collection", + "homepage": "", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "838eefb4f93f2306d4614aafb9b2375f315d917f", + "sha256": "1bm8cmh1wx4h8b4fhbs75hjci3gcrpi7k1m1pmiy3nc0gjim9vkg", + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/838eefb4f93f2306d4614aafb9b2375f315d917f.tar.gz", + "url_template": "https://github.com///archive/.tar.gz" + }, + "pypi-deps-db": { + "branch": "master", + "description": "Probably the most complete python dependency database", + "homepage": "", + "owner": "DavHau", + "repo": "pypi-deps-db", + "rev": "5fe7d2d1c85cd86d64f4f079eef3f1ff5653bcd6", + "sha256": "0pc6mj7rzvmhh303rvj5wf4hrksm4h2rf4fsvqs0ljjdmgxrqm3f", + "type": "tarball", + "url": "https://github.com/DavHau/pypi-deps-db/archive/5fe7d2d1c85cd86d64f4f079eef3f1ff5653bcd6.tar.gz", + "url_template": "https://github.com///archive/.tar.gz" + } +} diff --git a/nix/sources.nix b/nix/sources.nix new file mode 100644 index 000000000..1938409dd --- /dev/null +++ b/nix/sources.nix @@ -0,0 +1,174 @@ +# This file has been generated by Niv. + +let + + # + # The fetchers. fetch_ fetches specs of type . + # + + fetch_file = pkgs: name: spec: + let + name' = sanitizeName name + "-src"; + in + if spec.builtin or true then + builtins_fetchurl { inherit (spec) url sha256; name = name'; } + else + pkgs.fetchurl { inherit (spec) url sha256; name = name'; }; + + fetch_tarball = pkgs: name: spec: + let + name' = sanitizeName name + "-src"; + in + if spec.builtin or true then + builtins_fetchTarball { name = name'; inherit (spec) url sha256; } + else + pkgs.fetchzip { name = name'; inherit (spec) url sha256; }; + + fetch_git = name: spec: + let + ref = + if spec ? ref then spec.ref else + if spec ? branch then "refs/heads/${spec.branch}" else + if spec ? tag then "refs/tags/${spec.tag}" else + abort "In git source '${name}': Please specify `ref`, `tag` or `branch`!"; + in + builtins.fetchGit { url = spec.repo; inherit (spec) rev; inherit ref; }; + + fetch_local = spec: spec.path; + + fetch_builtin-tarball = name: throw + ''[${name}] The niv type "builtin-tarball" is deprecated. You should instead use `builtin = true`. + $ niv modify ${name} -a type=tarball -a builtin=true''; + + fetch_builtin-url = name: throw + ''[${name}] The niv type "builtin-url" will soon be deprecated. You should instead use `builtin = true`. + $ niv modify ${name} -a type=file -a builtin=true''; + + # + # Various helpers + # + + # https://github.com/NixOS/nixpkgs/pull/83241/files#diff-c6f540a4f3bfa4b0e8b6bafd4cd54e8bR695 + sanitizeName = name: + ( + concatMapStrings (s: if builtins.isList s then "-" else s) + ( + builtins.split "[^[:alnum:]+._?=-]+" + ((x: builtins.elemAt (builtins.match "\\.*(.*)" x) 0) name) + ) + ); + + # The set of packages used when specs are fetched using non-builtins. + mkPkgs = sources: system: + let + sourcesNixpkgs = + import (builtins_fetchTarball { inherit (sources.nixpkgs) url sha256; }) { inherit system; }; + hasNixpkgsPath = builtins.any (x: x.prefix == "nixpkgs") builtins.nixPath; + hasThisAsNixpkgsPath = == ./.; + in + if builtins.hasAttr "nixpkgs" sources + then sourcesNixpkgs + else if hasNixpkgsPath && ! hasThisAsNixpkgsPath then + import {} + else + abort + '' + Please specify either (through -I or NIX_PATH=nixpkgs=...) or + add a package called "nixpkgs" to your sources.json. + ''; + + # The actual fetching function. + fetch = pkgs: name: spec: + + if ! builtins.hasAttr "type" spec then + abort "ERROR: niv spec ${name} does not have a 'type' attribute" + else if spec.type == "file" then fetch_file pkgs name spec + else if spec.type == "tarball" then fetch_tarball pkgs name spec + else if spec.type == "git" then fetch_git name spec + else if spec.type == "local" then fetch_local spec + else if spec.type == "builtin-tarball" then fetch_builtin-tarball name + else if spec.type == "builtin-url" then fetch_builtin-url name + else + abort "ERROR: niv spec ${name} has unknown type ${builtins.toJSON spec.type}"; + + # If the environment variable NIV_OVERRIDE_${name} is set, then use + # the path directly as opposed to the fetched source. + replace = name: drv: + let + saneName = stringAsChars (c: if isNull (builtins.match "[a-zA-Z0-9]" c) then "_" else c) name; + ersatz = builtins.getEnv "NIV_OVERRIDE_${saneName}"; + in + if ersatz == "" then drv else + # this turns the string into an actual Nix path (for both absolute and + # relative paths) + if builtins.substring 0 1 ersatz == "/" then /. + ersatz else /. + builtins.getEnv "PWD" + "/${ersatz}"; + + # Ports of functions for older nix versions + + # a Nix version of mapAttrs if the built-in doesn't exist + mapAttrs = builtins.mapAttrs or ( + f: set: with builtins; + listToAttrs (map (attr: { name = attr; value = f attr set.${attr}; }) (attrNames set)) + ); + + # https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/lists.nix#L295 + range = first: last: if first > last then [] else builtins.genList (n: first + n) (last - first + 1); + + # https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/strings.nix#L257 + stringToCharacters = s: map (p: builtins.substring p 1 s) (range 0 (builtins.stringLength s - 1)); + + # https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/strings.nix#L269 + stringAsChars = f: s: concatStrings (map f (stringToCharacters s)); + concatMapStrings = f: list: concatStrings (map f list); + concatStrings = builtins.concatStringsSep ""; + + # https://github.com/NixOS/nixpkgs/blob/8a9f58a375c401b96da862d969f66429def1d118/lib/attrsets.nix#L331 + optionalAttrs = cond: as: if cond then as else {}; + + # fetchTarball version that is compatible between all the versions of Nix + builtins_fetchTarball = { url, name ? null, sha256 }@attrs: + let + inherit (builtins) lessThan nixVersion fetchTarball; + in + if lessThan nixVersion "1.12" then + fetchTarball ({ inherit url; } // (optionalAttrs (!isNull name) { inherit name; })) + else + fetchTarball attrs; + + # fetchurl version that is compatible between all the versions of Nix + builtins_fetchurl = { url, name ? null, sha256 }@attrs: + let + inherit (builtins) lessThan nixVersion fetchurl; + in + if lessThan nixVersion "1.12" then + fetchurl ({ inherit url; } // (optionalAttrs (!isNull name) { inherit name; })) + else + fetchurl attrs; + + # Create the final "sources" from the config + mkSources = config: + mapAttrs ( + name: spec: + if builtins.hasAttr "outPath" spec + then abort + "The values in sources.json should not have an 'outPath' attribute" + else + spec // { outPath = replace name (fetch config.pkgs name spec); } + ) config.sources; + + # The "config" used by the fetchers + mkConfig = + { sourcesFile ? if builtins.pathExists ./sources.json then ./sources.json else null + , sources ? if isNull sourcesFile then {} else builtins.fromJSON (builtins.readFile sourcesFile) + , system ? builtins.currentSystem + , pkgs ? mkPkgs sources system + }: rec { + # The sources, i.e. the attribute set of spec name to spec + inherit sources; + + # The "pkgs" (evaluated nixpkgs) to use for e.g. non-builtin fetchers + inherit pkgs; + }; + +in +mkSources (mkConfig {}) // { __functor = _: settings: mkSources (mkConfig settings); } diff --git a/nix/tahoe-lafs.nix b/nix/tahoe-lafs.nix deleted file mode 100644 index c7db6c583..000000000 --- a/nix/tahoe-lafs.nix +++ /dev/null @@ -1,125 +0,0 @@ -{ fetchFromGitHub, lib -, git, python -, twisted, foolscap, zfec -, setuptools, setuptoolsTrial, pyasn1, zope_interface -, service-identity, pyyaml, magic-wormhole, treq, appdirs -, beautifulsoup4, eliot, autobahn, cryptography, netifaces -, html5lib, pyutil, distro, configparser -}: -python.pkgs.buildPythonPackage rec { - # Most of the time this is not exactly the release version (eg 1.15.1). - # Give it a `post` component to make it look newer than the release version - # and we'll bump this up at the time of each release. - # - # It's difficult to read the version from Git the way the Python code does - # for two reasons. First, doing so involves populating the Nix expression - # with values from the source. Nix calls this "import from derivation" or - # "IFD" (). This is - # discouraged in most cases - including this one, I think. Second, the - # Python code reads the contents of `.git` to determine its version. `.git` - # is not a reproducable artifact (in the sense of "reproducable builds") so - # it is excluded from the source tree by default. When it is included, the - # package tends to be frequently spuriously rebuilt. - version = "1.15.1.post1"; - name = "tahoe-lafs-${version}"; - src = lib.cleanSourceWith { - src = ../.; - filter = name: type: - let - basename = baseNameOf name; - - split = lib.splitString "."; - join = builtins.concatStringsSep "."; - ext = join (builtins.tail (split basename)); - - # Build up a bunch of knowledge about what kind of file this is. - isTox = type == "directory" && basename == ".tox"; - isTrialTemp = type == "directory" && basename == "_trial_temp"; - isVersion = basename == "_version.py"; - isBytecode = ext == "pyc" || ext == "pyo"; - isBackup = lib.hasSuffix "~" basename; - isTemporary = lib.hasPrefix "#" basename && lib.hasSuffix "#" basename; - isSymlink = type == "symlink"; - isGit = type == "directory" && basename == ".git"; - in - # Exclude all these things - ! (isTox - || isTrialTemp - || isVersion - || isBytecode - || isBackup - || isTemporary - || isSymlink - || isGit - ); - }; - - postPatch = '' - # Chroots don't have /etc/hosts and /etc/resolv.conf, so work around - # that. - for i in $(find src/allmydata/test -type f) - do - sed -i "$i" -e"s/localhost/127.0.0.1/g" - done - - # Some tests are flaky or fail to skip when dependencies are missing. - # This list is over-zealous because it's more work to disable individual - # tests with in a module. - - # Many of these tests don't properly skip when i2p or tor dependencies are - # not supplied (and we are not supplying them). - rm src/allmydata/test/test_i2p_provider.py - rm src/allmydata/test/test_connections.py - rm src/allmydata/test/cli/test_create.py - - # Generate _version.py ourselves since we can't rely on the Python code - # extracting the information from the .git directory we excluded. - cat > src/allmydata/_version.py < /dev/null - ''; - - checkPhase = '' - ${python.interpreter} -m unittest discover -s twisted/test - ''; - # Tests require network - doCheck = false; - - meta = with stdenv.lib; { - homepage = https://twistedmatrix.com/; - description = "Twisted, an event-driven networking engine written in Python"; - longDescription = '' - Twisted is an event-driven networking engine written in Python - and licensed under the MIT license. - ''; - license = licenses.mit; - maintainers = [ ]; - }; -} diff --git a/pyinstaller.spec b/pyinstaller.spec index 875629c13..eece50757 100644 --- a/pyinstaller.spec +++ b/pyinstaller.spec @@ -11,7 +11,10 @@ import struct import sys -if not hasattr(sys, 'real_prefix'): +try: + import allmydata + del allmydata +except ImportError: sys.exit("Please run inside a virtualenv with Tahoe-LAFS installed.") diff --git a/relnotes.txt b/relnotes.txt index 4afbd6cc5..dd7cc9429 100644 --- a/relnotes.txt +++ b/relnotes.txt @@ -1,6 +1,6 @@ -ANNOUNCING Tahoe, the Least-Authority File Store, v1.15.1 +ANNOUNCING Tahoe, the Least-Authority File Store, v1.18.0 -The Tahoe-LAFS team is pleased to announce version 1.15.1 of +The Tahoe-LAFS team is pleased to announce version 1.18.0 of Tahoe-LAFS, an extremely reliable decentralized storage system. Get it with "pip install tahoe-lafs", or download a tarball here: @@ -15,17 +15,14 @@ unique security and fault-tolerance properties: https://tahoe-lafs.readthedocs.org/en/latest/about.html -The previous stable release of Tahoe-LAFS was v1.15.0, released on -January 19, 2021. +The previous stable release of Tahoe-LAFS was v1.17.1, released on +January 7, 2022. -In this release: PyPI does not accept uploads of packages that use -PEP-508 version specifiers. +This release drops support for Python 2 and for Python 3.6 and earlier. +twistd.pid is no longer used (in favour of one with pid + process creation time). +A collection of minor bugs and issues were also fixed. -Note that Python3 porting is underway but not yet complete in this -release. Developers may notice python3 as new targets for certain -tools. - -Please see ``NEWS.rst`` for a more complete list of changes. +Please see ``NEWS.rst`` [1] for a complete list of changes. WHAT IS IT GOOD FOR? @@ -64,12 +61,12 @@ to v1.0 (which was released March 25, 2008). Clients from this release can read files and directories produced by clients of all versions since v1.0. -Network connections are limited by the Introducer protocol in -use. If the Introducer is running v1.10 or v1.11, then servers -from this release (v1.12) can serve clients of all versions -back to v1.0 . If it is running v1.12, then they can only -serve clients back to v1.10. Clients from this release can use -servers back to v1.10, but not older servers. +Network connections are limited by the Introducer protocol in use. If +the Introducer is running v1.10 or v1.11, then servers from this +release can serve clients of all versions back to v1.0 . If it is +running v1.12 or higher, then they can only serve clients back to +v1.10. Clients from this release can use servers back to v1.10, but +not older servers. Except for the new optional MDMF format, we have not made any intentional compatibility changes. However we do not yet have @@ -77,7 +74,7 @@ the test infrastructure to continuously verify that all new versions are interoperable with previous versions. We intend to build such an infrastructure in the future. -This is the twenty-first release in the version 1 series. This +This is the twenty-second release in the version 1 series. This series of Tahoe-LAFS will be actively supported and maintained for the foreseeable future, and future versions of Tahoe-LAFS will retain the ability to read and write files compatible @@ -137,24 +134,23 @@ Of Fame" [13]. ACKNOWLEDGEMENTS -This is the eighteenth release of Tahoe-LAFS to be created -solely as a labor of love by volunteers. Thank you very much -to the team of "hackers in the public interest" who make -Tahoe-LAFS possible. +This is the twentieth release of Tahoe-LAFS to be created solely as a +labor of love by volunteers. Thank you very much to the team of +"hackers in the public interest" who make Tahoe-LAFS possible. meejah on behalf of the Tahoe-LAFS team -March 23, 2021 +October 1, 2022 Planet Earth -[1] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.15.1/NEWS.rst +[1] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.18.0/NEWS.rst [2] https://github.com/tahoe-lafs/tahoe-lafs/blob/master/docs/known_issues.rst [3] https://tahoe-lafs.org/trac/tahoe-lafs/wiki/RelatedProjects -[4] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.15.1/COPYING.GPL -[5] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.15.1/COPYING.TGPPL.rst -[6] https://tahoe-lafs.readthedocs.org/en/tahoe-lafs-1.15.1/INSTALL.html +[4] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.18.0/COPYING.GPL +[5] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.18.0/COPYING.TGPPL.rst +[6] https://tahoe-lafs.readthedocs.org/en/tahoe-lafs-1.18.0/INSTALL.html [7] https://lists.tahoe-lafs.org/mailman/listinfo/tahoe-dev [8] https://tahoe-lafs.org/trac/tahoe-lafs/roadmap [9] https://github.com/tahoe-lafs/tahoe-lafs/blob/master/CREDITS diff --git a/setup.cfg b/setup.cfg index 1277afd35..31ab7591c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -20,8 +20,8 @@ package_dir= =src include_package_data = True -# We support Python 2.7 and many newer versions of Python 3. -python_requires = >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.* +# We support Python 3.7 or later. 3.11 is not supported yet. +python_requires = >=3.7, <3.11 install_requires = # we don't need much out of setuptools but the version checking stuff @@ -47,8 +47,7 @@ install_requires = # * foolscap >= 0.12.6 has an i2p.sam_endpoint() that takes kwargs # * foolscap 0.13.2 drops i2p support completely # * foolscap >= 21.7 is necessary for Python 3 with i2p support. - foolscap == 0.13.1 ; python_version < '3.0' - foolscap >= 21.7.0 ; python_version > '3.0' + foolscap >= 21.7.0 # * cryptography 2.6 introduced some ed25519 APIs we rely on. Note that # Twisted[conch] also depends on cryptography and Twisted[tls] @@ -98,16 +97,12 @@ install_requires = # for 'tahoe invite' and 'tahoe join' magic-wormhole >= 0.10.2 - # Eliot is contemplating dropping Python 2 support. Stick to a version we - # know works on Python 2.7. - eliot ~= 1.7 ; python_version < '3.0' - # On Python 3, we want a new enough version to support custom JSON encoders. - eliot >= 1.13.0 ; python_version > '3.0' + # A new enough version to support custom JSON encoders. + eliot >= 1.13.0 # Pyrsistent 0.17.0 (which we use by way of Eliot) has dropped # Python 2 entirely; stick to the version known to work for us. - pyrsistent < 0.17.0 ; python_version < '3.0' - pyrsistent ; python_version > '3.0' + pyrsistent # A great way to define types of values. attrs >= 18.2.0 @@ -127,17 +122,27 @@ install_requires = # Linux distribution detection: distro >= 1.4.0 - # Backported configparser for Python 2: - configparser ; python_version < '3.0' - - # For the RangeMap datastructure. - collections-extended + # For the RangeMap datastructure. Need 2.0.2 at least for bugfixes. + collections-extended >= 2.0.2 # Duplicate the Twisted pywin32 dependency here. See # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2392 for some # discussion. pywin32 != 226 ; sys_platform=="win32" + # HTTP server and client + klein + + # 2.2.0 has a bug: https://github.com/pallets/werkzeug/issues/2465 + werkzeug != 2.2.0 + treq + cbor2 + pycddl >= 0.2 + + # for pid-file support + psutil + filelock + [options.packages.find] # inform the setuptools source discovery logic to start in this directory where = src @@ -152,9 +157,17 @@ allmydata.web = static/css/*.css [options.extras_require] +build = + dulwich + gpg + test = flake8 - # Pin a specific pyflakes so we don't have different folks + # On Python 3.7, importlib_metadata v5 breaks flake8. + # https://github.com/python/importlib_metadata/issues/407 + importlib_metadata < 5; python_version < 3.8 + +# Pin a specific pyflakes so we don't have different folks # disagreeing on what is or is not a lint issue. We can bump # this version from time to time, but we will do it # intentionally. @@ -164,12 +177,7 @@ test = tox pytest pytest-twisted - # XXX: decorator isn't a direct dependency, but pytest-twisted - # depends on decorator, and decorator 5.x isn't compatible with - # Python 2.7. - decorator < 5 hypothesis >= 3.6.1 - treq towncrier testtools fixtures @@ -177,7 +185,9 @@ test = html5lib junitxml tenacity - paramiko + # Pin old version until + # https://github.com/paramiko/paramiko/issues/1961 is fixed. + paramiko < 2.9 pytest-timeout # Does our OpenMetrics endpoint adhere to the spec: prometheus-client == 0.11.0 diff --git a/src/allmydata/__init__.py b/src/allmydata/__init__.py index 42611810b..333394fc5 100644 --- a/src/allmydata/__init__.py +++ b/src/allmydata/__init__.py @@ -16,28 +16,36 @@ if PY2: __all__ = [ "__version__", + "full_version", + "branch", "__appname__", "__full_version__", ] -def _discover_version(): - try: - from allmydata._version import version - except ImportError: - # Perhaps we're running from a git checkout where the _version.py file - # hasn't been generated yet. Try to discover the version using git - # information instead. - try: - import setuptools_scm - return setuptools_scm.get_version() - except Exception: - return "unknown" - else: - return version +__version__ = "unknown" +try: + # type ignored as it fails in CI + # (https://app.circleci.com/pipelines/github/tahoe-lafs/tahoe-lafs/1647/workflows/60ae95d4-abe8-492c-8a03-1ad3b9e42ed3/jobs/40972) + from allmydata._version import __version__ # type: ignore +except ImportError: + # We're running in a tree that hasn't run update_version, and didn't + # come with a _version.py, so we don't know what our version is. + # This should not happen very often. + pass + +full_version = "unknown" +branch = "unknown" +try: + # type ignored as it fails in CI + # (https://app.circleci.com/pipelines/github/tahoe-lafs/tahoe-lafs/1647/workflows/60ae95d4-abe8-492c-8a03-1ad3b9e42ed3/jobs/40972) + from allmydata._version import full_version, branch # type: ignore +except ImportError: + # We're running in a tree that hasn't run update_version, and didn't + # come with a _version.py, so we don't know what our full version or + # branch is. This should not happen very often. + pass __appname__ = "tahoe-lafs" -__version__ = _discover_version() -del _discover_version # __full_version__ is the one that you ought to use when identifying yourself # in the "application" part of the Tahoe versioning scheme: diff --git a/src/allmydata/client.py b/src/allmydata/client.py index aabae9065..1a158a1aa 100644 --- a/src/allmydata/client.py +++ b/src/allmydata/client.py @@ -1,17 +1,9 @@ """ Ported to Python 3. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -from future.utils import PY2 -if PY2: - from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, max, min # noqa: F401 - # Don't use future str to prevent leaking future's newbytes into foolscap, which they break. - from past.builtins import unicode as str +from __future__ import annotations +from typing import Optional import os, stat, time, weakref from base64 import urlsafe_b64encode from functools import partial @@ -36,11 +28,10 @@ from twisted.python.filepath import FilePath import allmydata from allmydata.crypto import rsa, ed25519 from allmydata.crypto.util import remove_prefix -from allmydata.storage.server import StorageServer +from allmydata.storage.server import StorageServer, FoolscapStorageServer from allmydata import storage_client from allmydata.immutable.upload import Uploader from allmydata.immutable.offloaded import Helper -from allmydata.control import ControlServer from allmydata.introducer.client import IntroducerClient from allmydata.util import ( hashutil, base32, pollmixin, log, idlib, @@ -113,6 +104,7 @@ _client_config = configutil.ValidConfiguration( "reserved_space", "storage_dir", "plugins", + "force_foolscap", ), "sftpd": ( "accounts.file", @@ -169,29 +161,12 @@ class SecretHolder(object): class KeyGenerator(object): """I create RSA keys for mutable files. Each call to generate() returns a - single keypair. The keysize is specified first by the keysize= argument - to generate(), then with a default set by set_default_keysize(), then - with a built-in default of 2048 bits.""" - def __init__(self): - self.default_keysize = 2048 + single keypair.""" - def set_default_keysize(self, keysize): - """Call this to override the size of the RSA keys created for new - mutable files which don't otherwise specify a size. This will affect - all subsequent calls to generate() without a keysize= argument. The - default size is 2048 bits. Test cases should call this method once - during setup, to cause me to create smaller keys, so the unit tests - run faster.""" - self.default_keysize = keysize - - def generate(self, keysize=None): + def generate(self): """I return a Deferred that fires with a (verifyingkey, signingkey) - pair. I accept a keysize in bits (2048 bit keys are standard, smaller - keys are used for testing). If you do not provide a keysize, I will - use my default, which is set by a call to set_default_keysize(). If - set_default_keysize() has never been called, I will create 2048 bit - keys.""" - keysize = keysize or self.default_keysize + pair. The returned key will be 2048 bit""" + keysize = 2048 # RSA key generation for a 2048 bit key takes between 0.8 and 3.2 # secs signer, verifier = rsa.create_signing_keypair(keysize) @@ -283,7 +258,6 @@ def create_client_from_config(config, _client_factory=None, _introducer_factory= config, tub_options, default_connection_handlers, foolscap_connection_handlers, i2p_provider, tor_provider, ) - control_tub = node.create_control_tub() introducer_clients = create_introducer_clients(config, main_tub, _introducer_factory) storage_broker = create_storage_farm_broker( @@ -294,7 +268,6 @@ def create_client_from_config(config, _client_factory=None, _introducer_factory= client = _client_factory( config, main_tub, - control_tub, i2p_provider, tor_provider, introducer_clients, @@ -611,6 +584,10 @@ def anonymous_storage_enabled(config): @implementer(IStatsProducer) class _Client(node.Node, pollmixin.PollMixin): + """ + This class should be refactored; see + https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3931 + """ STOREDIR = 'storage' NODETYPE = "client" @@ -631,12 +608,12 @@ class _Client(node.Node, pollmixin.PollMixin): "max_segment_size": DEFAULT_MAX_SEGMENT_SIZE, } - def __init__(self, config, main_tub, control_tub, i2p_provider, tor_provider, introducer_clients, + def __init__(self, config, main_tub, i2p_provider, tor_provider, introducer_clients, storage_farm_broker): """ Use :func:`allmydata.client.create_client` to instantiate one of these. """ - node.Node.__init__(self, config, main_tub, control_tub, i2p_provider, tor_provider) + node.Node.__init__(self, config, main_tub, i2p_provider, tor_provider) self.started_timestamp = time.time() self.logSource = "Client" @@ -648,7 +625,6 @@ class _Client(node.Node, pollmixin.PollMixin): self.init_stats_provider() self.init_secrets() self.init_node_key() - self.init_control() self._key_generator = KeyGenerator() key_gen_furl = config.get_config("client", "key_generator.furl", None) if key_gen_furl: @@ -679,6 +655,14 @@ class _Client(node.Node, pollmixin.PollMixin): if webport: self.init_web(webport) # strports string + # TODO this may be the wrong location for now? but as temporary measure + # it allows us to get NURLs for testing in test_istorageserver.py. This + # will eventually get fixed one way or another in + # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3901. See also + # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3931 for the bigger + # picture issue. + self.storage_nurls : Optional[set] = None + def init_stats_provider(self): self.stats_provider = StatsProvider(self) self.stats_provider.setServiceParent(self) @@ -838,7 +822,12 @@ class _Client(node.Node, pollmixin.PollMixin): if anonymous_storage_enabled(self.config): furl_file = self.config.get_private_path("storage.furl").encode(get_filesystem_encoding()) - furl = self.tub.registerReference(ss, furlFile=furl_file) + furl = self.tub.registerReference(FoolscapStorageServer(ss), furlFile=furl_file) + (_, _, swissnum) = decode_furl(furl) + if hasattr(self.tub.negotiationClass, "add_storage_server"): + nurls = self.tub.negotiationClass.add_storage_server(ss, swissnum.encode("ascii")) + self.storage_nurls = nurls + announcement[storage_client.ANONYMOUS_STORAGE_NURLS] = [n.to_text() for n in nurls] announcement["anonymous-storage-FURL"] = furl enabled_storage_servers = self._enable_storage_servers( @@ -985,12 +974,6 @@ class _Client(node.Node, pollmixin.PollMixin): def get_history(self): return self.history - def init_control(self): - c = ControlServer() - c.setServiceParent(self) - control_url = self.control_tub.registerReference(c) - self.config.write_private_config("control.furl", control_url + "\n") - def init_helper(self): self.helper = Helper(self.config.get_config_path("helper"), self.storage_broker, self._secret_holder, @@ -1003,9 +986,6 @@ class _Client(node.Node, pollmixin.PollMixin): helper_furlfile = self.config.get_private_path("helper.furl").encode(get_filesystem_encoding()) self.tub.registerReference(self.helper, furlFile=helper_furlfile) - def set_default_mutable_keysize(self, keysize): - self._key_generator.set_default_keysize(keysize) - def _get_tempdir(self): """ Determine the path to the directory where temporary files for this node @@ -1106,8 +1086,8 @@ class _Client(node.Node, pollmixin.PollMixin): def create_immutable_dirnode(self, children, convergence=None): return self.nodemaker.create_immutable_directory(children, convergence) - def create_mutable_file(self, contents=None, keysize=None, version=None): - return self.nodemaker.create_mutable_file(contents, keysize, + def create_mutable_file(self, contents=None, version=None): + return self.nodemaker.create_mutable_file(contents, version=version) def upload(self, uploadable, reactor=None): diff --git a/src/allmydata/control.py b/src/allmydata/control.py deleted file mode 100644 index 7efa174ab..000000000 --- a/src/allmydata/control.py +++ /dev/null @@ -1,273 +0,0 @@ -"""Ported to Python 3. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -from future.utils import PY2 -if PY2: - from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 - -import os, time, tempfile -from zope.interface import implementer -from twisted.application import service -from twisted.internet import defer -from twisted.internet.interfaces import IConsumer -from foolscap.api import Referenceable -from allmydata.interfaces import RIControlClient, IFileNode -from allmydata.util import fileutil, mathutil -from allmydata.immutable import upload -from allmydata.mutable.publish import MutableData -from twisted.python import log - -def get_memory_usage(): - # this is obviously linux-specific - stat_names = (b"VmPeak", - b"VmSize", - #b"VmHWM", - b"VmData") - stats = {} - try: - with open("/proc/self/status", "rb") as f: - for line in f: - name, right = line.split(b":",2) - if name in stat_names: - assert right.endswith(b" kB\n") - right = right[:-4] - stats[name] = int(right) * 1024 - except: - # Probably not on (a compatible version of) Linux - stats['VmSize'] = 0 - stats['VmPeak'] = 0 - return stats - -def log_memory_usage(where=""): - stats = get_memory_usage() - log.msg("VmSize: %9d VmPeak: %9d %s" % (stats[b"VmSize"], - stats[b"VmPeak"], - where)) - -@implementer(IConsumer) -class FileWritingConsumer(object): - def __init__(self, filename): - self.done = False - self.f = open(filename, "wb") - def registerProducer(self, p, streaming): - if streaming: - p.resumeProducing() - else: - while not self.done: - p.resumeProducing() - def write(self, data): - self.f.write(data) - def unregisterProducer(self): - self.done = True - self.f.close() - -@implementer(RIControlClient) -class ControlServer(Referenceable, service.Service): - - def remote_wait_for_client_connections(self, num_clients): - return self.parent.debug_wait_for_client_connections(num_clients) - - def remote_upload_random_data_from_file(self, size, convergence): - tempdir = tempfile.mkdtemp() - filename = os.path.join(tempdir, "data") - f = open(filename, "wb") - block = b"a" * 8192 - while size > 0: - l = min(size, 8192) - f.write(block[:l]) - size -= l - f.close() - uploader = self.parent.getServiceNamed("uploader") - u = upload.FileName(filename, convergence=convergence) - # XXX should pass reactor arg - d = uploader.upload(u) - d.addCallback(lambda results: results.get_uri()) - def _done(uri): - os.remove(filename) - os.rmdir(tempdir) - return uri - d.addCallback(_done) - return d - - def remote_download_to_tempfile_and_delete(self, uri): - tempdir = tempfile.mkdtemp() - filename = os.path.join(tempdir, "data") - filenode = self.parent.create_node_from_uri(uri, name=filename) - if not IFileNode.providedBy(filenode): - raise AssertionError("The URI does not reference a file.") - c = FileWritingConsumer(filename) - d = filenode.read(c) - def _done(res): - os.remove(filename) - os.rmdir(tempdir) - return None - d.addCallback(_done) - return d - - def remote_speed_test(self, count, size, mutable): - assert size > 8 - log.msg("speed_test: count=%d, size=%d, mutable=%s" % (count, size, - mutable)) - st = SpeedTest(self.parent, count, size, mutable) - return st.run() - - def remote_get_memory_usage(self): - return get_memory_usage() - - def remote_measure_peer_response_time(self): - # I'd like to average together several pings, but I don't want this - # phase to take more than 10 seconds. Expect worst-case latency to be - # 300ms. - results = {} - sb = self.parent.get_storage_broker() - everyone = sb.get_connected_servers() - num_pings = int(mathutil.div_ceil(10, (len(everyone) * 0.3))) - everyone = list(everyone) * num_pings - d = self._do_one_ping(None, everyone, results) - return d - def _do_one_ping(self, res, everyone_left, results): - if not everyone_left: - return results - server = everyone_left.pop(0) - server_name = server.get_longname() - storage_server = server.get_storage_server() - start = time.time() - d = storage_server.get_buckets(b"\x00" * 16) - def _done(ignored): - stop = time.time() - elapsed = stop - start - if server_name in results: - results[server_name].append(elapsed) - else: - results[server_name] = [elapsed] - d.addCallback(_done) - d.addCallback(self._do_one_ping, everyone_left, results) - def _average(res): - averaged = {} - for server_name,times in results.items(): - averaged[server_name] = sum(times) / len(times) - return averaged - d.addCallback(_average) - return d - -class SpeedTest(object): - def __init__(self, parent, count, size, mutable): - self.parent = parent - self.count = count - self.size = size - self.mutable_mode = mutable - self.uris = {} - self.basedir = self.parent.config.get_config_path("_speed_test_data") - - def run(self): - self.create_data() - d = self.do_upload() - d.addCallback(lambda res: self.do_download()) - d.addBoth(self.do_cleanup) - d.addCallback(lambda res: (self.upload_time, self.download_time)) - return d - - def create_data(self): - fileutil.make_dirs(self.basedir) - for i in range(self.count): - s = self.size - fn = os.path.join(self.basedir, str(i)) - if os.path.exists(fn): - os.unlink(fn) - f = open(fn, "wb") - f.write(os.urandom(8)) - s -= 8 - while s > 0: - chunk = min(s, 4096) - f.write(b"\x00" * chunk) - s -= chunk - f.close() - - def do_upload(self): - d = defer.succeed(None) - def _create_slot(res): - d1 = self.parent.create_mutable_file(b"") - def _created(n): - self._n = n - d1.addCallback(_created) - return d1 - if self.mutable_mode == "upload": - d.addCallback(_create_slot) - def _start(res): - self._start = time.time() - d.addCallback(_start) - - def _record_uri(uri, i): - self.uris[i] = uri - def _upload_one_file(ignored, i): - if i >= self.count: - return - fn = os.path.join(self.basedir, str(i)) - if self.mutable_mode == "create": - data = open(fn,"rb").read() - d1 = self.parent.create_mutable_file(data) - d1.addCallback(lambda n: n.get_uri()) - elif self.mutable_mode == "upload": - data = open(fn,"rb").read() - d1 = self._n.overwrite(MutableData(data)) - d1.addCallback(lambda res: self._n.get_uri()) - else: - up = upload.FileName(fn, convergence=None) - d1 = self.parent.upload(up) - d1.addCallback(lambda results: results.get_uri()) - d1.addCallback(_record_uri, i) - d1.addCallback(_upload_one_file, i+1) - return d1 - d.addCallback(_upload_one_file, 0) - def _upload_done(ignored): - stop = time.time() - self.upload_time = stop - self._start - d.addCallback(_upload_done) - return d - - def do_download(self): - start = time.time() - d = defer.succeed(None) - def _download_one_file(ignored, i): - if i >= self.count: - return - n = self.parent.create_node_from_uri(self.uris[i]) - if not IFileNode.providedBy(n): - raise AssertionError("The URI does not reference a file.") - if n.is_mutable(): - d1 = n.download_best_version() - else: - d1 = n.read(DiscardingConsumer()) - d1.addCallback(_download_one_file, i+1) - return d1 - d.addCallback(_download_one_file, 0) - def _download_done(ignored): - stop = time.time() - self.download_time = stop - start - d.addCallback(_download_done) - return d - - def do_cleanup(self, res): - for i in range(self.count): - fn = os.path.join(self.basedir, str(i)) - os.unlink(fn) - return res - -@implementer(IConsumer) -class DiscardingConsumer(object): - def __init__(self): - self.done = False - def registerProducer(self, p, streaming): - if streaming: - p.resumeProducing() - else: - while not self.done: - p.resumeProducing() - def write(self, data): - pass - def unregisterProducer(self): - self.done = True diff --git a/src/allmydata/crypto/rsa.py b/src/allmydata/crypto/rsa.py index b5d15ad4a..95cf01413 100644 --- a/src/allmydata/crypto/rsa.py +++ b/src/allmydata/crypto/rsa.py @@ -77,6 +77,14 @@ def create_signing_keypair_from_string(private_key_der): password=None, backend=default_backend(), ) + if not isinstance(priv_key, rsa.RSAPrivateKey): + raise ValueError( + "Private Key did not decode to an RSA key" + ) + if priv_key.key_size != 2048: + raise ValueError( + "Private Key must be 2048 bits" + ) return priv_key, priv_key.public_key() diff --git a/src/allmydata/frontends/auth.py b/src/allmydata/frontends/auth.py index b61062334..b6f9c2b7e 100644 --- a/src/allmydata/frontends/auth.py +++ b/src/allmydata/frontends/auth.py @@ -12,7 +12,7 @@ if PY2: from zope.interface import implementer from twisted.internet import defer -from twisted.cred import error, checkers, credentials +from twisted.cred import checkers, credentials from twisted.conch.ssh import keys from twisted.conch.checkers import SSHPublicKeyChecker, InMemorySSHKeyDB @@ -32,65 +32,93 @@ class FTPAvatarID(object): @implementer(checkers.ICredentialsChecker) class AccountFileChecker(object): - credentialInterfaces = (credentials.IUsernamePassword, - credentials.IUsernameHashedPassword, - credentials.ISSHPrivateKey) + credentialInterfaces = (credentials.ISSHPrivateKey,) + def __init__(self, client, accountfile): self.client = client - self.passwords = BytesKeyDict() - pubkeys = BytesKeyDict() - self.rootcaps = BytesKeyDict() - with open(abspath_expanduser_unicode(accountfile), "rb") as f: - for line in f: - line = line.strip() - if line.startswith(b"#") or not line: - continue - name, passwd, rest = line.split(None, 2) - if passwd.startswith(b"ssh-"): - bits = rest.split() - keystring = b" ".join([passwd] + bits[:-1]) - key = keys.Key.fromString(keystring) - rootcap = bits[-1] - pubkeys[name] = [key] - else: - self.passwords[name] = passwd - rootcap = rest - self.rootcaps[name] = rootcap + path = abspath_expanduser_unicode(accountfile) + with open_account_file(path) as f: + self.rootcaps, pubkeys = load_account_file(f) self._pubkeychecker = SSHPublicKeyChecker(InMemorySSHKeyDB(pubkeys)) def _avatarId(self, username): return FTPAvatarID(username, self.rootcaps[username]) - def _cbPasswordMatch(self, matched, username): - if matched: - return self._avatarId(username) - raise error.UnauthorizedLogin - def requestAvatarId(self, creds): if credentials.ISSHPrivateKey.providedBy(creds): d = defer.maybeDeferred(self._pubkeychecker.requestAvatarId, creds) d.addCallback(self._avatarId) return d - elif credentials.IUsernameHashedPassword.providedBy(creds): - return self._checkPassword(creds) - elif credentials.IUsernamePassword.providedBy(creds): - return self._checkPassword(creds) - else: - raise NotImplementedError() + raise NotImplementedError() - def _checkPassword(self, creds): - """ - Determine whether the password in the given credentials matches the - password in the account file. +def open_account_file(path): + """ + Open and return the accounts file at the given path. + """ + return open(path, "rt", encoding="utf-8") - Returns a Deferred that fires with the username if the password matches - or with an UnauthorizedLogin failure otherwise. - """ - try: - correct = self.passwords[creds.username] - except KeyError: - return defer.fail(error.UnauthorizedLogin()) +def load_account_file(lines): + """ + Load credentials from an account file. - d = defer.maybeDeferred(creds.checkPassword, correct) - d.addCallback(self._cbPasswordMatch, creds.username) - return d + :param lines: An iterable of account lines to load. + + :return: See ``create_account_maps``. + """ + return create_account_maps( + parse_accounts( + content_lines( + lines, + ), + ), + ) + +def content_lines(lines): + """ + Drop empty and commented-out lines (``#``-prefixed) from an iterator of + lines. + + :param lines: An iterator of lines to process. + + :return: An iterator of lines including only those from ``lines`` that + include content intended to be loaded. + """ + for line in lines: + line = line.strip() + if line and not line.startswith("#"): + yield line + +def parse_accounts(lines): + """ + Parse account lines into their components (name, key, rootcap). + """ + for line in lines: + name, passwd, rest = line.split(None, 2) + if not passwd.startswith("ssh-"): + raise ValueError( + "Password-based authentication is not supported; " + "configure key-based authentication instead." + ) + + bits = rest.split() + keystring = " ".join([passwd] + bits[:-1]) + key = keys.Key.fromString(keystring) + rootcap = bits[-1] + yield (name, key, rootcap) + +def create_account_maps(accounts): + """ + Build mappings from account names to keys and rootcaps. + + :param accounts: An iterator if (name, key, rootcap) tuples. + + :return: A tuple of two dicts. The first maps account names to rootcaps. + The second maps account names to public keys. + """ + rootcaps = BytesKeyDict() + pubkeys = BytesKeyDict() + for (name, key, rootcap) in accounts: + name_bytes = name.encode("utf-8") + rootcaps[name_bytes] = rootcap.encode("utf-8") + pubkeys[name_bytes] = [key] + return rootcaps, pubkeys diff --git a/src/allmydata/history.py b/src/allmydata/history.py index b5cfb7318..06a22ab5d 100644 --- a/src/allmydata/history.py +++ b/src/allmydata/history.py @@ -20,7 +20,7 @@ class History(object): MAX_UPLOAD_STATUSES = 10 MAX_MAPUPDATE_STATUSES = 20 MAX_PUBLISH_STATUSES = 20 - MAX_RETRIEVE_STATUSES = 20 + MAX_RETRIEVE_STATUSES = 40 def __init__(self, stats_provider=None): self.stats_provider = stats_provider diff --git a/src/allmydata/immutable/downloader/share.py b/src/allmydata/immutable/downloader/share.py index 41e11426f..016f1c34d 100644 --- a/src/allmydata/immutable/downloader/share.py +++ b/src/allmydata/immutable/downloader/share.py @@ -475,7 +475,7 @@ class Share(object): # there was corruption somewhere in the given range reason = "corruption in share[%d-%d): %s" % (start, start+offset, str(f.value)) - self._rref.callRemote( + return self._rref.callRemote( "advise_corrupt_share", reason.encode("utf-8") ).addErrback(log.err, "Error from remote call to advise_corrupt_share") diff --git a/src/allmydata/immutable/encode.py b/src/allmydata/immutable/encode.py index 42fc18077..874492785 100644 --- a/src/allmydata/immutable/encode.py +++ b/src/allmydata/immutable/encode.py @@ -694,3 +694,24 @@ class Encoder(object): return self.uri_extension_data def get_uri_extension_hash(self): return self.uri_extension_hash + + def get_uri_extension_size(self): + """ + Calculate the size of the URI extension that gets written at the end of + immutables. + + This may be done earlier than actual encoding, so e.g. we might not + know the crypttext hashes, but that's fine for our purposes since we + only care about the length. + """ + params = self.uri_extension_data.copy() + params["crypttext_hash"] = b"\x00" * hashutil.CRYPTO_VAL_SIZE + params["crypttext_root_hash"] = b"\x00" * hashutil.CRYPTO_VAL_SIZE + params["share_root_hash"] = b"\x00" * hashutil.CRYPTO_VAL_SIZE + assert params.keys() == { + "codec_name", "codec_params", "size", "segment_size", "num_segments", + "needed_shares", "total_shares", "tail_codec_params", + "crypttext_hash", "crypttext_root_hash", "share_root_hash" + }, params.keys() + uri_extension = uri.pack_extension(params) + return len(uri_extension) diff --git a/src/allmydata/immutable/layout.py b/src/allmydata/immutable/layout.py index 6c7362b8a..d552d43c4 100644 --- a/src/allmydata/immutable/layout.py +++ b/src/allmydata/immutable/layout.py @@ -19,6 +19,7 @@ from allmydata.util import mathutil, observer, pipeline, log from allmydata.util.assertutil import precondition from allmydata.storage.server import si_b2a + class LayoutInvalid(Exception): """ There is something wrong with these bytes so they can't be interpreted as the kind of immutable file that I know how to download.""" @@ -90,7 +91,7 @@ FORCE_V2 = False # set briefly by unit tests to make small-sized V2 shares def make_write_bucket_proxy(rref, server, data_size, block_size, num_segments, - num_share_hashes, uri_extension_size_max): + num_share_hashes, uri_extension_size): # Use layout v1 for small files, so they'll be readable by older versions # (= 2**32 or data_size >= 2**32: @@ -195,6 +196,14 @@ class WriteBucketProxy(object): return self._write(offset, data) def put_crypttext_hashes(self, hashes): + # plaintext_hash_tree precedes crypttext_hash_tree. It is not used, and + # so is not explicitly written, but we need to write everything, so + # fill it in with nulls. + d = self._write(self._offsets['plaintext_hash_tree'], b"\x00" * self._segment_hash_size) + d.addCallback(lambda _: self._really_put_crypttext_hashes(hashes)) + return d + + def _really_put_crypttext_hashes(self, hashes): offset = self._offsets['crypttext_hash_tree'] assert isinstance(hashes, list) data = b"".join(hashes) @@ -233,8 +242,7 @@ class WriteBucketProxy(object): def put_uri_extension(self, data): offset = self._offsets['uri_extension'] assert isinstance(data, bytes) - precondition(len(data) <= self._uri_extension_size_max, - len(data), self._uri_extension_size_max) + precondition(len(data) == self._uri_extension_size) length = struct.pack(self.fieldstruct, len(data)) return self._write(offset, length+data) @@ -244,17 +252,18 @@ class WriteBucketProxy(object): # would reduce the foolscap CPU overhead per share, but wouldn't # reduce the number of round trips, so it might not be worth the # effort. - + self._written_bytes += len(data) return self._pipeline.add(len(data), self._rref.callRemote, "write", offset, data) def close(self): + assert self._written_bytes == self.get_allocated_size(), f"{self._written_bytes} != {self.get_allocated_size()}" d = self._pipeline.add(0, self._rref.callRemote, "close") d.addCallback(lambda ign: self._pipeline.flush()) return d def abort(self): - self._rref.callRemote("abort").addErrback(log.err, "Error from remote call to abort an immutable write bucket") + return self._rref.callRemote("abort").addErrback(log.err, "Error from remote call to abort an immutable write bucket") def get_servername(self): return self._server.get_name() @@ -303,8 +312,6 @@ class WriteBucketProxy_v2(WriteBucketProxy): @implementer(IStorageBucketReader) class ReadBucketProxy(object): - MAX_UEB_SIZE = 2000 # actual size is closer to 419, but varies by a few bytes - def __init__(self, rref, server, storage_index): self._rref = rref self._server = server @@ -332,11 +339,6 @@ class ReadBucketProxy(object): # TODO: for small shares, read the whole bucket in _start() d = self._fetch_header() d.addCallback(self._parse_offsets) - # XXX The following two callbacks implement a slightly faster/nicer - # way to get the ueb and sharehashtree, but it requires that the - # storage server be >= v1.3.0. - # d.addCallback(self._fetch_sharehashtree_and_ueb) - # d.addCallback(self._parse_sharehashtree_and_ueb) def _fail_waiters(f): self._ready.fire(f) def _notify_waiters(result): @@ -381,29 +383,6 @@ class ReadBucketProxy(object): self._offsets[field] = offset return self._offsets - def _fetch_sharehashtree_and_ueb(self, offsets): - sharehashtree_size = offsets['uri_extension'] - offsets['share_hashes'] - return self._read(offsets['share_hashes'], - self.MAX_UEB_SIZE+sharehashtree_size) - - def _parse_sharehashtree_and_ueb(self, data): - sharehashtree_size = self._offsets['uri_extension'] - self._offsets['share_hashes'] - if len(data) < sharehashtree_size: - raise LayoutInvalid("share hash tree truncated -- should have at least %d bytes -- not %d" % (sharehashtree_size, len(data))) - if sharehashtree_size % (2+HASH_SIZE) != 0: - raise LayoutInvalid("share hash tree malformed -- should have an even multiple of %d bytes -- not %d" % (2+HASH_SIZE, sharehashtree_size)) - self._share_hashes = [] - for i in range(0, sharehashtree_size, 2+HASH_SIZE): - hashnum = struct.unpack(">H", data[i:i+2])[0] - hashvalue = data[i+2:i+2+HASH_SIZE] - self._share_hashes.append( (hashnum, hashvalue) ) - - i = self._offsets['uri_extension']-self._offsets['share_hashes'] - if len(data) < i+self._fieldsize: - raise LayoutInvalid("not enough bytes to encode URI length -- should be at least %d bytes long, not %d " % (i+self._fieldsize, len(data),)) - length = struct.unpack(self._fieldstruct, data[i:i+self._fieldsize])[0] - self._ueb_data = data[i+self._fieldsize:i+self._fieldsize+length] - def _get_block_data(self, unused, blocknum, blocksize, thisblocksize): offset = self._offsets['data'] + blocknum * blocksize return self._read(offset, thisblocksize) @@ -446,20 +425,18 @@ class ReadBucketProxy(object): else: return defer.succeed([]) - def _get_share_hashes(self, unused=None): - if hasattr(self, '_share_hashes'): - return self._share_hashes - return self._get_share_hashes_the_old_way() - def get_share_hashes(self): d = self._start_if_needed() d.addCallback(self._get_share_hashes) return d - def _get_share_hashes_the_old_way(self): + def _get_share_hashes(self, _ignore): """ Tahoe storage servers < v1.3.0 would return an error if you tried to read past the end of the share, so we need to use the offset and - read just that much.""" + read just that much. + + HTTP-based storage protocol also doesn't like reading past the end. + """ offset = self._offsets['share_hashes'] size = self._offsets['uri_extension'] - offset if size % (2+HASH_SIZE) != 0: @@ -477,32 +454,29 @@ class ReadBucketProxy(object): d.addCallback(_unpack_share_hashes) return d - def _get_uri_extension_the_old_way(self, unused=None): + def _get_uri_extension(self, unused=None): """ Tahoe storage servers < v1.3.0 would return an error if you tried to read past the end of the share, so we need to fetch the UEB size - and then read just that much.""" + and then read just that much. + + HTTP-based storage protocol also doesn't like reading past the end. + """ offset = self._offsets['uri_extension'] d = self._read(offset, self._fieldsize) def _got_length(data): if len(data) != self._fieldsize: raise LayoutInvalid("not enough bytes to encode URI length -- should be %d bytes long, not %d " % (self._fieldsize, len(data),)) length = struct.unpack(self._fieldstruct, data)[0] - if length >= 2**31: - # URI extension blocks are around 419 bytes long, so this - # must be corrupted. Anyway, the foolscap interface schema - # for "read" will not allow >= 2**31 bytes length. + if length >= 2000: + # URI extension blocks are around 419 bytes long; in previous + # versions of the code 1000 was used as a default catchall. So + # 2000 or more must be corrupted. raise RidiculouslyLargeURIExtensionBlock(length) return self._read(offset+self._fieldsize, length) d.addCallback(_got_length) return d - def _get_uri_extension(self, unused=None): - if hasattr(self, '_ueb_data'): - return self._ueb_data - else: - return self._get_uri_extension_the_old_way() - def get_uri_extension(self): d = self._start_if_needed() d.addCallback(self._get_uri_extension) diff --git a/src/allmydata/immutable/upload.py b/src/allmydata/immutable/upload.py index cb332dfdf..6b9b48f6a 100644 --- a/src/allmydata/immutable/upload.py +++ b/src/allmydata/immutable/upload.py @@ -242,31 +242,26 @@ class UploadResults(object): def get_verifycapstr(self): return self._verifycapstr -# our current uri_extension is 846 bytes for small files, a few bytes -# more for larger ones (since the filesize is encoded in decimal in a -# few places). Ask for a little bit more just in case we need it. If -# the extension changes size, we can change EXTENSION_SIZE to -# allocate a more accurate amount of space. -EXTENSION_SIZE = 1000 -# TODO: actual extensions are closer to 419 bytes, so we can probably lower -# this. def pretty_print_shnum_to_servers(s): return ', '.join([ "sh%s: %s" % (k, '+'.join([idlib.shortnodeid_b2a(x) for x in v])) for k, v in s.items() ]) + class ServerTracker(object): def __init__(self, server, sharesize, blocksize, num_segments, num_share_hashes, storage_index, - bucket_renewal_secret, bucket_cancel_secret): + bucket_renewal_secret, bucket_cancel_secret, + uri_extension_size): self._server = server self.buckets = {} # k: shareid, v: IRemoteBucketWriter self.sharesize = sharesize + self.uri_extension_size = uri_extension_size wbp = layout.make_write_bucket_proxy(None, None, sharesize, blocksize, num_segments, num_share_hashes, - EXTENSION_SIZE) + uri_extension_size) self.wbp_class = wbp.__class__ # to create more of them self.allocated_size = wbp.get_allocated_size() self.blocksize = blocksize @@ -314,7 +309,7 @@ class ServerTracker(object): self.blocksize, self.num_segments, self.num_share_hashes, - EXTENSION_SIZE) + self.uri_extension_size) b[sharenum] = bp self.buckets.update(b) return (alreadygot, set(b.keys())) @@ -487,7 +482,7 @@ class Tahoe2ServerSelector(log.PrefixingLogMixin): def get_shareholders(self, storage_broker, secret_holder, storage_index, share_size, block_size, num_segments, total_shares, needed_shares, - min_happiness): + min_happiness, uri_extension_size): """ @return: (upload_trackers, already_serverids), where upload_trackers is a set of ServerTracker instances that have agreed to hold @@ -529,7 +524,8 @@ class Tahoe2ServerSelector(log.PrefixingLogMixin): # figure out how much space to ask for wbp = layout.make_write_bucket_proxy(None, None, share_size, 0, num_segments, - num_share_hashes, EXTENSION_SIZE) + num_share_hashes, + uri_extension_size) allocated_size = wbp.get_allocated_size() # decide upon the renewal/cancel secrets, to include them in the @@ -554,7 +550,7 @@ class Tahoe2ServerSelector(log.PrefixingLogMixin): def _create_server_tracker(server, renew, cancel): return ServerTracker( server, share_size, block_size, num_segments, num_share_hashes, - storage_index, renew, cancel, + storage_index, renew, cancel, uri_extension_size ) readonly_trackers, write_trackers = self._create_trackers( @@ -1326,7 +1322,8 @@ class CHKUploader(object): d = server_selector.get_shareholders(storage_broker, secret_holder, storage_index, share_size, block_size, - num_segments, n, k, desired) + num_segments, n, k, desired, + encoder.get_uri_extension_size()) def _done(res): self._server_selection_elapsed = time.time() - server_selection_started return res diff --git a/src/allmydata/interfaces.py b/src/allmydata/interfaces.py index 5522663ee..f055a01e2 100644 --- a/src/allmydata/interfaces.py +++ b/src/allmydata/interfaces.py @@ -52,6 +52,8 @@ WriteEnablerSecret = Hash # used to protect mutable share modifications LeaseRenewSecret = Hash # used to protect lease renewal requests LeaseCancelSecret = Hash # was used to protect lease cancellation requests +class NoSpace(Exception): + """Storage space was not available for a space-allocating operation.""" class DataTooLargeError(Exception): """The write went past the expected size of the bucket.""" diff --git a/src/allmydata/introducer/server.py b/src/allmydata/introducer/server.py index 1e28f511b..f0638439a 100644 --- a/src/allmydata/introducer/server.py +++ b/src/allmydata/introducer/server.py @@ -39,7 +39,6 @@ from allmydata.introducer.common import unsign_from_foolscap, \ from allmydata.node import read_config from allmydata.node import create_node_dir from allmydata.node import create_connection_handlers -from allmydata.node import create_control_tub from allmydata.node import create_tub_options from allmydata.node import create_main_tub @@ -88,12 +87,10 @@ def create_introducer(basedir=u"."): config, tub_options, default_connection_handlers, foolscap_connection_handlers, i2p_provider, tor_provider, ) - control_tub = create_control_tub() node = _IntroducerNode( config, main_tub, - control_tub, i2p_provider, tor_provider, ) @@ -105,8 +102,8 @@ def create_introducer(basedir=u"."): class _IntroducerNode(node.Node): NODETYPE = "introducer" - def __init__(self, config, main_tub, control_tub, i2p_provider, tor_provider): - node.Node.__init__(self, config, main_tub, control_tub, i2p_provider, tor_provider) + def __init__(self, config, main_tub, i2p_provider, tor_provider): + node.Node.__init__(self, config, main_tub, i2p_provider, tor_provider) self.init_introducer() webport = self.get_config("node", "web.port", None) if webport: @@ -136,7 +133,7 @@ class _IntroducerNode(node.Node): os.rename(old_public_fn, private_fn) furl = self.tub.registerReference(introducerservice, furlFile=private_fn) - self.log(" introducer is at %s" % furl, umid="qF2L9A") + self.log(" introducer can be found in {!r}".format(private_fn), umid="qF2L9A") self.introducer_url = furl # for tests def init_web(self, webport): diff --git a/src/allmydata/node.py b/src/allmydata/node.py index 5a6f8c66f..8266fe3fb 100644 --- a/src/allmydata/node.py +++ b/src/allmydata/node.py @@ -55,6 +55,8 @@ from allmydata.util.yamlutil import ( from . import ( __full_version__, ) +from .protocol_switch import create_tub_with_https_support + def _common_valid_config(): return configutil.ValidConfiguration({ @@ -695,7 +697,7 @@ def create_connection_handlers(config, i2p_provider, tor_provider): def create_tub(tub_options, default_connection_handlers, foolscap_connection_handlers, - handler_overrides={}, **kwargs): + handler_overrides={}, force_foolscap=False, **kwargs): """ Create a Tub with the right options and handlers. It will be ephemeral unless the caller provides certFile= in kwargs @@ -705,8 +707,17 @@ def create_tub(tub_options, default_connection_handlers, foolscap_connection_han :param dict tub_options: every key-value pair in here will be set in the new Tub via `Tub.setOption` + + :param bool force_foolscap: If True, only allow Foolscap, not just HTTPS + storage protocol. """ - tub = Tub(**kwargs) + # We listen simultaneously for both Foolscap and HTTPS on the same port, + # so we have to create a special Foolscap Tub for that to work: + if force_foolscap: + tub = Tub(**kwargs) + else: + tub = create_tub_with_https_support(**kwargs) + for (name, value) in list(tub_options.items()): tub.setOption(name, value) handlers = default_connection_handlers.copy() @@ -896,14 +907,20 @@ def create_main_tub(config, tub_options, # FIXME? "node.pem" was the CERTFILE option/thing certfile = config.get_private_path("node.pem") - tub = create_tub( tub_options, default_connection_handlers, foolscap_connection_handlers, + # TODO eventually we will want the default to be False, but for now we + # don't want to enable HTTP by default. + # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3934 + force_foolscap=config.get_config( + "storage", "force_foolscap", default=True, boolean=True + ), handler_overrides=handler_overrides, certFile=certfile, ) + if portlocation is None: log.msg("Tub is not listening") else: @@ -919,18 +936,6 @@ def create_main_tub(config, tub_options, return tub -def create_control_tub(): - """ - Creates a Foolscap Tub for use by the control port. This is a - localhost-only ephemeral Tub, with no control over the listening - port or location - """ - control_tub = Tub() - portnum = iputil.listenOnUnused(control_tub) - log.msg("Control Tub location set to 127.0.0.1:%s" % (portnum,)) - return control_tub - - class Node(service.MultiService): """ This class implements common functionality of both Client nodes and Introducer nodes. @@ -938,7 +943,7 @@ class Node(service.MultiService): NODETYPE = "unknown NODETYPE" CERTFILE = "node.pem" - def __init__(self, config, main_tub, control_tub, i2p_provider, tor_provider): + def __init__(self, config, main_tub, i2p_provider, tor_provider): """ Initialize the node with the given configuration. Its base directory is the current directory by default. @@ -967,10 +972,6 @@ class Node(service.MultiService): else: self.nodeid = self.short_nodeid = None - self.control_tub = control_tub - if self.control_tub is not None: - self.control_tub.setServiceParent(self) - self.log("Node constructed. " + __full_version__) iputil.increase_rlimits() diff --git a/src/allmydata/nodemaker.py b/src/allmydata/nodemaker.py index 6b0b77c5c..23ba4b451 100644 --- a/src/allmydata/nodemaker.py +++ b/src/allmydata/nodemaker.py @@ -126,12 +126,12 @@ class NodeMaker(object): return self._create_dirnode(filenode) return None - def create_mutable_file(self, contents=None, keysize=None, version=None): + def create_mutable_file(self, contents=None, version=None): if version is None: version = self.mutable_file_default n = MutableFileNode(self.storage_broker, self.secret_holder, self.default_encoding_parameters, self.history) - d = self.key_generator.generate(keysize) + d = self.key_generator.generate() d.addCallback(n.create_with_keys, contents, version=version) d.addCallback(lambda res: n) return d diff --git a/src/allmydata/protocol_switch.py b/src/allmydata/protocol_switch.py new file mode 100644 index 000000000..b0af84c33 --- /dev/null +++ b/src/allmydata/protocol_switch.py @@ -0,0 +1,210 @@ +""" +Support for listening with both HTTPS and Foolscap on the same port. + +The goal is to make the transition from Foolscap to HTTPS-based protocols as +simple as possible, with no extra configuration needed. Listening on the same +port means a user upgrading Tahoe-LAFS will automatically get HTTPS working +with no additional changes. + +Use ``create_tub_with_https_support()`` creates a new ``Tub`` that has its +``negotiationClass`` modified to be a new subclass tied to that specific +``Tub`` instance. Calling ``tub.negotiationClass.add_storage_server(...)`` +then adds relevant information for a storage server once it becomes available +later in the configuration process. +""" + +from __future__ import annotations + +from itertools import chain + +from twisted.internet.protocol import Protocol +from twisted.internet.interfaces import IDelayedCall +from twisted.internet.ssl import CertificateOptions +from twisted.web.server import Site +from twisted.protocols.tls import TLSMemoryBIOFactory +from twisted.internet import reactor + +from hyperlink import DecodedURL +from foolscap.negotiate import Negotiation +from foolscap.api import Tub + +from .storage.http_server import HTTPServer, build_nurl +from .storage.server import StorageServer + + +class _PretendToBeNegotiation(type): + """ + Metaclass that allows ``_FoolscapOrHttps`` to pretend to be a + ``Negotiation`` instance, since Foolscap does some checks like + ``assert isinstance(protocol, tub.negotiationClass)`` in its internals, + and sometimes that ``protocol`` is a ``_FoolscapOrHttps`` instance, but + sometimes it's a ``Negotiation`` instance. + """ + + def __instancecheck__(self, instance): + return issubclass(instance.__class__, self) or isinstance(instance, Negotiation) + + +class _FoolscapOrHttps(Protocol, metaclass=_PretendToBeNegotiation): + """ + Based on initial query, decide whether we're talking Foolscap or HTTP. + + Additionally, pretends to be a ``foolscap.negotiate.Negotiation`` instance, + since these are created by Foolscap's ``Tub``, by setting this to be the + tub's ``negotiationClass``. + + Do not instantiate directly, use ``create_tub_with_https_support(...)`` + instead. The way this class works is that a new subclass is created for a + specific ``Tub`` instance. + """ + + # These are class attributes; they will be set by + # create_tub_with_https_support() and add_storage_server(). + + # The Twisted HTTPS protocol factory wrapping the storage server HTTP API: + https_factory: TLSMemoryBIOFactory + # The tub that created us: + tub: Tub + + @classmethod + def add_storage_server( + cls, storage_server: StorageServer, swissnum: bytes + ) -> set[DecodedURL]: + """ + Update a ``_FoolscapOrHttps`` subclass for a specific ``Tub`` instance + with the class attributes it requires for a specific storage server. + + Returns the resulting NURLs. + """ + # We need to be a subclass: + assert cls != _FoolscapOrHttps + # The tub instance must already be set: + assert hasattr(cls, "tub") + assert isinstance(cls.tub, Tub) + + # Tub.myCertificate is a twisted.internet.ssl.PrivateCertificate + # instance. + certificate_options = CertificateOptions( + privateKey=cls.tub.myCertificate.privateKey.original, + certificate=cls.tub.myCertificate.original, + ) + + http_storage_server = HTTPServer(storage_server, swissnum) + cls.https_factory = TLSMemoryBIOFactory( + certificate_options, + False, + Site(http_storage_server.get_resource()), + ) + + storage_nurls = set() + # Individual hints can be in the form + # "tcp:host:port,tcp:host:port,tcp:host:port". + for location_hint in chain.from_iterable( + hints.split(",") for hints in cls.tub.locationHints + ): + if location_hint.startswith("tcp:"): + _, hostname, port = location_hint.split(":") + port = int(port) + storage_nurls.add( + build_nurl( + hostname, + port, + str(swissnum, "ascii"), + cls.tub.myCertificate.original.to_cryptography(), + ) + ) + # TODO this is probably where we'll have to support Tor and I2P? + # See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3888#comment:9 + # for discussion (there will be separate tickets added for those at + # some point.) + return storage_nurls + + def __init__(self, *args, **kwargs): + self._foolscap: Negotiation = Negotiation(*args, **kwargs) + + def __setattr__(self, name, value): + if name in {"_foolscap", "_buffer", "transport", "__class__", "_timeout"}: + object.__setattr__(self, name, value) + else: + setattr(self._foolscap, name, value) + + def __getattr__(self, name): + return getattr(self._foolscap, name) + + def _convert_to_negotiation(self): + """ + Convert self to a ``Negotiation`` instance. + """ + self.__class__ = Negotiation # type: ignore + self.__dict__ = self._foolscap.__dict__ + + def initClient(self, *args, **kwargs): + # After creation, a Negotiation instance either has initClient() or + # initServer() called. Since this is a client, we're never going to do + # HTTP, so we can immediately become a Negotiation instance. + assert not hasattr(self, "_buffer") + self._convert_to_negotiation() + return self.initClient(*args, **kwargs) + + def connectionMade(self): + self._buffer: bytes = b"" + self._timeout: IDelayedCall = reactor.callLater( + 30, self.transport.abortConnection + ) + + def connectionLost(self, reason): + if self._timeout.active(): + self._timeout.cancel() + + def dataReceived(self, data: bytes) -> None: + """Handle incoming data. + + Once we've decided which protocol we are, update self.__class__, at + which point all methods will be called on the new class. + """ + self._buffer += data + if len(self._buffer) < 8: + return + + # Check if it looks like a Foolscap request. If so, it can handle this + # and later data, otherwise assume HTTPS. + self._timeout.cancel() + if self._buffer.startswith(b"GET /id/"): + # We're a Foolscap Negotiation server protocol instance: + transport = self.transport + buf = self._buffer + self._convert_to_negotiation() + self.makeConnection(transport) + self.dataReceived(buf) + return + else: + # We're a HTTPS protocol instance, serving the storage protocol: + assert self.transport is not None + protocol = self.https_factory.buildProtocol(self.transport.getPeer()) + protocol.makeConnection(self.transport) + protocol.dataReceived(self._buffer) + + # Update the factory so it knows we're transforming to a new + # protocol object (we'll do that next) + value = self.https_factory.protocols.pop(protocol) + self.https_factory.protocols[self] = value + + # Transform self into the TLS protocol 🪄 + self.__class__ = protocol.__class__ + self.__dict__ = protocol.__dict__ + + +def create_tub_with_https_support(**kwargs) -> Tub: + """ + Create a new Tub that also supports HTTPS. + + This involves creating a new protocol switch class for the specific ``Tub`` + instance. + """ + the_tub = Tub(**kwargs) + + class FoolscapOrHttpForTub(_FoolscapOrHttps): + tub = the_tub + + the_tub.negotiationClass = FoolscapOrHttpForTub # type: ignore + return the_tub diff --git a/src/allmydata/scripts/admin.py b/src/allmydata/scripts/admin.py index a9feed0dd..e0dcc8821 100644 --- a/src/allmydata/scripts/admin.py +++ b/src/allmydata/scripts/admin.py @@ -18,7 +18,17 @@ except ImportError: pass from twisted.python import usage -from allmydata.scripts.common import BaseOptions +from twisted.python.filepath import ( + FilePath, +) +from allmydata.scripts.common import ( + BaseOptions, + BasedirOptions, +) +from allmydata.storage import ( + crawler, + expirer, +) class GenerateKeypairOptions(BaseOptions): @@ -65,12 +75,55 @@ def derive_pubkey(options): print("public:", str(ed25519.string_from_verifying_key(public_key), "ascii"), file=out) return 0 +class MigrateCrawlerOptions(BasedirOptions): + + def getSynopsis(self): + return "Usage: tahoe [global-options] admin migrate-crawler" + + def getUsage(self, width=None): + t = BasedirOptions.getUsage(self, width) + t += ( + "The crawler data is now stored as JSON to avoid" + " potential security issues with pickle files.\n\nIf" + " you are confident the state files in the 'storage/'" + " subdirectory of your node are trustworthy, run this" + " command to upgrade them to JSON.\n\nThe files are:" + " lease_checker.history, lease_checker.state, and" + " bucket_counter.state" + ) + return t + + +def migrate_crawler(options): + out = options.stdout + storage = FilePath(options['basedir']).child("storage") + + conversions = [ + (storage.child("lease_checker.state"), crawler._convert_pickle_state_to_json), + (storage.child("bucket_counter.state"), crawler._convert_pickle_state_to_json), + (storage.child("lease_checker.history"), expirer._convert_pickle_state_to_json), + ] + + for fp, converter in conversions: + existed = fp.exists() + newfp = crawler._upgrade_pickle_to_json(fp, converter) + if existed: + print("Converted '{}' to '{}'".format(fp.path, newfp.path), file=out) + else: + if newfp.exists(): + print("Already converted: '{}'".format(newfp.path), file=out) + else: + print("Not found: '{}'".format(fp.path), file=out) + + class AdminCommand(BaseOptions): subCommands = [ ("generate-keypair", None, GenerateKeypairOptions, "Generate a public/private keypair, write to stdout."), ("derive-pubkey", None, DerivePubkeyOptions, "Derive a public key from a private key."), + ("migrate-crawler", None, MigrateCrawlerOptions, + "Write the crawler-history data as JSON."), ] def postOptions(self): if not hasattr(self, 'subOptions'): @@ -88,6 +141,7 @@ each subcommand. subDispatch = { "generate-keypair": print_keypair, "derive-pubkey": derive_pubkey, + "migrate-crawler": migrate_crawler, } def do_admin(options): diff --git a/src/allmydata/scripts/common.py b/src/allmydata/scripts/common.py index 0a9ab8714..c9fc8e031 100644 --- a/src/allmydata/scripts/common.py +++ b/src/allmydata/scripts/common.py @@ -141,7 +141,9 @@ def write_introducer(basedir, petname, furl): """ if isinstance(furl, bytes): furl = furl.decode("utf-8") - basedir.child(b"private").child(b"introducers.yaml").setContent( + private = basedir.child(b"private") + private.makedirs(ignoreExistingDirectory=True) + private.child(b"introducers.yaml").setContent( safe_dump({ "introducers": { petname: { diff --git a/src/allmydata/scripts/create_node.py b/src/allmydata/scripts/create_node.py index 4959ed391..5d9da518b 100644 --- a/src/allmydata/scripts/create_node.py +++ b/src/allmydata/scripts/create_node.py @@ -37,9 +37,6 @@ from allmydata.util.assertutil import precondition from allmydata.util.encodingutil import listdir_unicode, argv_to_unicode, quote_local_unicode_path, get_io_encoding from allmydata.util import fileutil, i2p_provider, iputil, tor_provider, jsonbytes as json -from wormhole import wormhole - - dummy_tac = """ import sys print("Nodes created by Tahoe-LAFS v1.11.0 or later cannot be run by") @@ -377,7 +374,7 @@ def _get_config_via_wormhole(config): relay_url = config.parent['wormhole-server'] print("Connecting to '{}'".format(relay_url), file=out) - wh = wormhole.create( + wh = config.parent.wormhole.create( appid=config.parent['wormhole-invite-appid'], relay_url=relay_url, reactor=reactor, diff --git a/src/allmydata/scripts/debug.py b/src/allmydata/scripts/debug.py index 2d6ba4602..6201ce28f 100644 --- a/src/allmydata/scripts/debug.py +++ b/src/allmydata/scripts/debug.py @@ -15,15 +15,22 @@ try: except ImportError: pass - -# do not import any allmydata modules at this level. Do that from inside -# individual functions instead. import struct, time, os, sys + from twisted.python import usage, failure from twisted.internet import defer from foolscap.logging import cli as foolscap_cli -from allmydata.scripts.common import BaseOptions +from allmydata.scripts.common import BaseOptions +from allmydata import uri +from allmydata.storage.mutable import MutableShareFile +from allmydata.storage.immutable import ShareFile +from allmydata.mutable.layout import unpack_share +from allmydata.mutable.layout import MDMFSlotReadProxy +from allmydata.mutable.common import NeedMoreDataError +from allmydata.immutable.layout import ReadBucketProxy +from allmydata.util import base32 +from allmydata.util.encodingutil import quote_output class DumpOptions(BaseOptions): def getSynopsis(self): @@ -56,13 +63,11 @@ def dump_share(options): # check the version, to see if we have a mutable or immutable share print("share filename: %s" % quote_output(options['filename']), file=out) - f = open(options['filename'], "rb") - prefix = f.read(32) - f.close() - if prefix == MutableShareFile.MAGIC: - return dump_mutable_share(options) - # otherwise assume it's immutable - return dump_immutable_share(options) + with open(options['filename'], "rb") as f: + if MutableShareFile.is_valid_header(f.read(32)): + return dump_mutable_share(options) + # otherwise assume it's immutable + return dump_immutable_share(options) def dump_immutable_share(options): from allmydata.storage.immutable import ShareFile @@ -170,7 +175,7 @@ def dump_immutable_lease_info(f, out): leases = list(f.get_leases()) if leases: for i,lease in enumerate(leases): - when = format_expiration_time(lease.expiration_time) + when = format_expiration_time(lease.get_expiration_time()) print(" Lease #%d: owner=%d, expire in %s" \ % (i, lease.owner_num, when), file=out) else: @@ -223,10 +228,10 @@ def dump_mutable_share(options): print(file=out) print(" Lease #%d:" % leasenum, file=out) print(" ownerid: %d" % lease.owner_num, file=out) - when = format_expiration_time(lease.expiration_time) + when = format_expiration_time(lease.get_expiration_time()) print(" expires in %s" % when, file=out) - print(" renew_secret: %s" % str(base32.b2a(lease.renew_secret), "utf-8"), file=out) - print(" cancel_secret: %s" % str(base32.b2a(lease.cancel_secret), "utf-8"), file=out) + print(" renew_secret: %s" % lease.present_renew_secret(), file=out) + print(" cancel_secret: %s" % lease.present_cancel_secret(), file=out) print(" secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid), file=out) else: print("No leases.", file=out) @@ -712,125 +717,122 @@ def call(c, *args, **kwargs): return results[0] def describe_share(abs_sharefile, si_s, shnum_s, now, out): - from allmydata import uri - from allmydata.storage.mutable import MutableShareFile - from allmydata.storage.immutable import ShareFile - from allmydata.mutable.layout import unpack_share - from allmydata.mutable.common import NeedMoreDataError - from allmydata.immutable.layout import ReadBucketProxy - from allmydata.util import base32 - from allmydata.util.encodingutil import quote_output - import struct - - f = open(abs_sharefile, "rb") - prefix = f.read(32) - - if prefix == MutableShareFile.MAGIC: - # mutable share - m = MutableShareFile(abs_sharefile) - WE, nodeid = m._read_write_enabler_and_nodeid(f) - data_length = m._read_data_length(f) - expiration_time = min( [lease.expiration_time - for (i,lease) in m._enumerate_leases(f)] ) - expiration = max(0, expiration_time - now) - - share_type = "unknown" - f.seek(m.DATA_OFFSET) - version = f.read(1) - if version == b"\x00": - # this slot contains an SMDF share - share_type = "SDMF" - elif version == b"\x01": - share_type = "MDMF" - - if share_type == "SDMF": - f.seek(m.DATA_OFFSET) - data = f.read(min(data_length, 2000)) - - try: - pieces = unpack_share(data) - except NeedMoreDataError as e: - # retry once with the larger size - size = e.needed_bytes - f.seek(m.DATA_OFFSET) - data = f.read(min(data_length, size)) - pieces = unpack_share(data) - (seqnum, root_hash, IV, k, N, segsize, datalen, - pubkey, signature, share_hash_chain, block_hash_tree, - share_data, enc_privkey) = pieces - - print("SDMF %s %d/%d %d #%d:%s %d %s" % \ - (si_s, k, N, datalen, - seqnum, str(base32.b2a(root_hash), "utf-8"), - expiration, quote_output(abs_sharefile)), file=out) - elif share_type == "MDMF": - from allmydata.mutable.layout import MDMFSlotReadProxy - fake_shnum = 0 - # TODO: factor this out with dump_MDMF_share() - class ShareDumper(MDMFSlotReadProxy): - def _read(self, readvs, force_remote=False, queue=False): - data = [] - for (where,length) in readvs: - f.seek(m.DATA_OFFSET+where) - data.append(f.read(length)) - return defer.succeed({fake_shnum: data}) - - p = ShareDumper(None, "fake-si", fake_shnum) - def extract(func): - stash = [] - # these methods return Deferreds, but we happen to know that - # they run synchronously when not actually talking to a - # remote server - d = func() - d.addCallback(stash.append) - return stash[0] - - verinfo = extract(p.get_verinfo) - (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix, - offsets) = verinfo - print("MDMF %s %d/%d %d #%d:%s %d %s" % \ - (si_s, k, N, datalen, - seqnum, str(base32.b2a(root_hash), "utf-8"), - expiration, quote_output(abs_sharefile)), file=out) + with open(abs_sharefile, "rb") as f: + prefix = f.read(32) + if MutableShareFile.is_valid_header(prefix): + _describe_mutable_share(abs_sharefile, f, now, si_s, out) + elif ShareFile.is_valid_header(prefix): + _describe_immutable_share(abs_sharefile, now, si_s, out) else: - print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out) + print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile), file=out) - elif struct.unpack(">L", prefix[:4]) == (1,): - # immutable +def _describe_mutable_share(abs_sharefile, f, now, si_s, out): + # mutable share + m = MutableShareFile(abs_sharefile) + WE, nodeid = m._read_write_enabler_and_nodeid(f) + data_length = m._read_data_length(f) + expiration_time = min( [lease.get_expiration_time() + for (i,lease) in m._enumerate_leases(f)] ) + expiration = max(0, expiration_time - now) - class ImmediateReadBucketProxy(ReadBucketProxy): - def __init__(self, sf): - self.sf = sf - ReadBucketProxy.__init__(self, None, None, "") - def __repr__(self): - return "" - def _read(self, offset, size): - return defer.succeed(sf.read_share_data(offset, size)) + share_type = "unknown" + f.seek(m.DATA_OFFSET) + version = f.read(1) + if version == b"\x00": + # this slot contains an SMDF share + share_type = "SDMF" + elif version == b"\x01": + share_type = "MDMF" - # use a ReadBucketProxy to parse the bucket and find the uri extension - sf = ShareFile(abs_sharefile) - bp = ImmediateReadBucketProxy(sf) + if share_type == "SDMF": + f.seek(m.DATA_OFFSET) - expiration_time = min( [lease.expiration_time - for lease in sf.get_leases()] ) - expiration = max(0, expiration_time - now) + # Read at least the mutable header length, if possible. If there's + # less data than that in the share, don't try to read more (we won't + # be able to unpack the header in this case but we surely don't want + # to try to unpack bytes *following* the data section as if they were + # header data). Rather than 2000 we could use HEADER_LENGTH from + # allmydata/mutable/layout.py, probably. + data = f.read(min(data_length, 2000)) - UEB_data = call(bp.get_uri_extension) - unpacked = uri.unpack_extension_readable(UEB_data) + try: + pieces = unpack_share(data) + except NeedMoreDataError as e: + # retry once with the larger size + size = e.needed_bytes + f.seek(m.DATA_OFFSET) + data = f.read(min(data_length, size)) + pieces = unpack_share(data) + (seqnum, root_hash, IV, k, N, segsize, datalen, + pubkey, signature, share_hash_chain, block_hash_tree, + share_data, enc_privkey) = pieces - k = unpacked["needed_shares"] - N = unpacked["total_shares"] - filesize = unpacked["size"] - ueb_hash = unpacked["UEB_hash"] + print("SDMF %s %d/%d %d #%d:%s %d %s" % \ + (si_s, k, N, datalen, + seqnum, str(base32.b2a(root_hash), "utf-8"), + expiration, quote_output(abs_sharefile)), file=out) + elif share_type == "MDMF": + fake_shnum = 0 + # TODO: factor this out with dump_MDMF_share() + class ShareDumper(MDMFSlotReadProxy): + def _read(self, readvs, force_remote=False, queue=False): + data = [] + for (where,length) in readvs: + f.seek(m.DATA_OFFSET+where) + data.append(f.read(length)) + return defer.succeed({fake_shnum: data}) - print("CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize, - str(ueb_hash, "utf-8"), expiration, - quote_output(abs_sharefile)), file=out) + p = ShareDumper(None, "fake-si", fake_shnum) + def extract(func): + stash = [] + # these methods return Deferreds, but we happen to know that + # they run synchronously when not actually talking to a + # remote server + d = func() + d.addCallback(stash.append) + return stash[0] + verinfo = extract(p.get_verinfo) + (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix, + offsets) = verinfo + print("MDMF %s %d/%d %d #%d:%s %d %s" % \ + (si_s, k, N, datalen, + seqnum, str(base32.b2a(root_hash), "utf-8"), + expiration, quote_output(abs_sharefile)), file=out) else: - print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile), file=out) + print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out) + + +def _describe_immutable_share(abs_sharefile, now, si_s, out): + class ImmediateReadBucketProxy(ReadBucketProxy): + def __init__(self, sf): + self.sf = sf + ReadBucketProxy.__init__(self, None, None, "") + def __repr__(self): + return "" + def _read(self, offset, size): + return defer.succeed(sf.read_share_data(offset, size)) + + # use a ReadBucketProxy to parse the bucket and find the uri extension + sf = ShareFile(abs_sharefile) + bp = ImmediateReadBucketProxy(sf) + + expiration_time = min(lease.get_expiration_time() + for lease in sf.get_leases()) + expiration = max(0, expiration_time - now) + + UEB_data = call(bp.get_uri_extension) + unpacked = uri.unpack_extension_readable(UEB_data) + + k = unpacked["needed_shares"] + N = unpacked["total_shares"] + filesize = unpacked["size"] + ueb_hash = unpacked["UEB_hash"] + + print("CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize, + str(ueb_hash, "utf-8"), expiration, + quote_output(abs_sharefile)), file=out) - f.close() def catalog_shares(options): from allmydata.util.encodingutil import listdir_unicode, quote_output @@ -933,34 +935,35 @@ def corrupt_share(options): f.write(d) f.close() - f = open(fn, "rb") - prefix = f.read(32) - f.close() - if prefix == MutableShareFile.MAGIC: - # mutable - m = MutableShareFile(fn) - f = open(fn, "rb") - f.seek(m.DATA_OFFSET) - data = f.read(2000) - # make sure this slot contains an SMDF share - assert data[0:1] == b"\x00", "non-SDMF mutable shares not supported" - f.close() + with open(fn, "rb") as f: + prefix = f.read(32) - (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize, - ig_datalen, offsets) = unpack_header(data) + if MutableShareFile.is_valid_header(prefix): + # mutable + m = MutableShareFile(fn) + with open(fn, "rb") as f: + f.seek(m.DATA_OFFSET) + # Read enough data to get a mutable header to unpack. + data = f.read(2000) + # make sure this slot contains an SMDF share + assert data[0:1] == b"\x00", "non-SDMF mutable shares not supported" + f.close() - assert version == 0, "we only handle v0 SDMF files" - start = m.DATA_OFFSET + offsets["share_data"] - end = m.DATA_OFFSET + offsets["enc_privkey"] - flip_bit(start, end) - else: - # otherwise assume it's immutable - f = ShareFile(fn) - bp = ReadBucketProxy(None, None, '') - offsets = bp._parse_offsets(f.read_share_data(0, 0x24)) - start = f._data_offset + offsets["data"] - end = f._data_offset + offsets["plaintext_hash_tree"] - flip_bit(start, end) + (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize, + ig_datalen, offsets) = unpack_header(data) + + assert version == 0, "we only handle v0 SDMF files" + start = m.DATA_OFFSET + offsets["share_data"] + end = m.DATA_OFFSET + offsets["enc_privkey"] + flip_bit(start, end) + else: + # otherwise assume it's immutable + f = ShareFile(fn) + bp = ReadBucketProxy(None, None, '') + offsets = bp._parse_offsets(f.read_share_data(0, 0x24)) + start = f._data_offset + offsets["data"] + end = f._data_offset + offsets["plaintext_hash_tree"] + flip_bit(start, end) diff --git a/src/allmydata/scripts/runner.py b/src/allmydata/scripts/runner.py index 145ee6464..756c26f2c 100644 --- a/src/allmydata/scripts/runner.py +++ b/src/allmydata/scripts/runner.py @@ -47,22 +47,23 @@ if _default_nodedir: NODEDIR_HELP += " [default for most commands: " + quote_local_unicode_path(_default_nodedir) + "]" -# XXX all this 'dispatch' stuff needs to be unified + fixed up -_control_node_dispatch = { - "run": tahoe_run.run, -} - process_control_commands = [ ("run", None, tahoe_run.RunOptions, "run a node without daemonizing"), ] # type: SubCommands class Options(usage.Options): + """ + :ivar wormhole: An object exposing the magic-wormhole API (mainly a test + hook). + """ # unit tests can override these to point at StringIO instances stdin = sys.stdin stdout = sys.stdout stderr = sys.stderr + from wormhole import wormhole + subCommands = ( create_node.subCommands + admin.subCommands + process_control_commands @@ -189,6 +190,7 @@ def parse_or_exit(config, argv, stdout, stderr): return config def dispatch(config, + reactor, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr): command = config.subCommand so = config.subOptions @@ -200,8 +202,8 @@ def dispatch(config, if command in create_dispatch: f = create_dispatch[command] - elif command in _control_node_dispatch: - f = _control_node_dispatch[command] + elif command == "run": + f = lambda config: tahoe_run.run(reactor, config) elif command in debug.dispatch: f = debug.dispatch[command] elif command in admin.dispatch: @@ -355,7 +357,7 @@ def _run_with_reactor(reactor, config, argv, stdout, stderr): stderr, ) d.addCallback(_maybe_enable_eliot_logging, reactor) - d.addCallback(dispatch, stdout=stdout, stderr=stderr) + d.addCallback(dispatch, reactor, stdout=stdout, stderr=stderr) def _show_exception(f): # when task.react() notices a non-SystemExit exception, it does # log.err() with the failure and then exits with rc=1. We want this diff --git a/src/allmydata/scripts/tahoe_invite.py b/src/allmydata/scripts/tahoe_invite.py index 09d4cbd59..b62d6a463 100644 --- a/src/allmydata/scripts/tahoe_invite.py +++ b/src/allmydata/scripts/tahoe_invite.py @@ -18,8 +18,6 @@ except ImportError: from twisted.python import usage from twisted.internet import defer, reactor -from wormhole import wormhole - from allmydata.util.encodingutil import argv_to_abspath from allmydata.util import jsonbytes as json from allmydata.scripts.common import get_default_nodedir, get_introducer_furl @@ -50,7 +48,7 @@ def _send_config_via_wormhole(options, config): err = options.stderr relay_url = options.parent['wormhole-server'] print("Connecting to '{}'...".format(relay_url), file=out) - wh = wormhole.create( + wh = options.parent.wormhole.create( appid=options.parent['wormhole-invite-appid'], relay_url=relay_url, reactor=reactor, diff --git a/src/allmydata/scripts/tahoe_run.py b/src/allmydata/scripts/tahoe_run.py index 01f1a354c..65d520f57 100644 --- a/src/allmydata/scripts/tahoe_run.py +++ b/src/allmydata/scripts/tahoe_run.py @@ -19,6 +19,7 @@ import os, sys from allmydata.scripts.common import BasedirOptions from twisted.scripts import twistd from twisted.python import usage +from twisted.python.filepath import FilePath from twisted.python.reflect import namedAny from twisted.internet.defer import maybeDeferred from twisted.application.service import Service @@ -27,41 +28,49 @@ from allmydata.scripts.default_nodedir import _default_nodedir from allmydata.util.encodingutil import listdir_unicode, quote_local_unicode_path from allmydata.util.configutil import UnknownConfigError from allmydata.util.deferredutil import HookMixin - +from allmydata.util.pid import ( + parse_pidfile, + check_pid_process, + cleanup_pidfile, + ProcessInTheWay, + InvalidPidFile, +) +from allmydata.storage.crawler import ( + MigratePickleFileError, +) from allmydata.node import ( PortAssignmentRequired, PrivacyError, ) + def get_pidfile(basedir): """ Returns the path to the PID file. :param basedir: the node's base directory :returns: the path to the PID file """ - return os.path.join(basedir, u"twistd.pid") + return os.path.join(basedir, u"running.process") + def get_pid_from_pidfile(pidfile): """ Tries to read and return the PID stored in the node's PID file - (twistd.pid). + :param pidfile: try to read this PID file :returns: A numeric PID on success, ``None`` if PID file absent or inaccessible, ``-1`` if PID file invalid. """ try: - with open(pidfile, "r") as f: - pid = f.read() + pid, _ = parse_pidfile(pidfile) except EnvironmentError: return None - - try: - pid = int(pid) - except ValueError: + except InvalidPidFile: return -1 return pid + def identify_node_type(basedir): """ :return unicode: None or one of: 'client' or 'introducer'. @@ -164,8 +173,20 @@ class DaemonizeTheRealService(Service, HookMixin): self.stderr.write("\ntub.port cannot be 0: you must choose.\n\n") elif reason.check(PrivacyError): self.stderr.write("\n{}\n\n".format(reason.value)) + elif reason.check(MigratePickleFileError): + self.stderr.write( + "Error\nAt least one 'pickle' format file exists.\n" + "The file is {}\n" + "You must either delete the pickle-format files" + " or migrate them using the command:\n" + " tahoe admin migrate-crawler --basedir {}\n\n" + .format( + reason.value.args[0].path, + self.basedir, + ) + ) else: - self.stderr.write("\nUnknown error\n") + self.stderr.write("\nUnknown error, here's the traceback:\n") reason.printTraceback(self.stderr) reactor.stop() @@ -192,7 +213,7 @@ class DaemonizeTahoeNodePlugin(object): return DaemonizeTheRealService(self.nodetype, self.basedir, so) -def run(config, runApp=twistd.runApp): +def run(reactor, config, runApp=twistd.runApp): """ Runs a Tahoe-LAFS node in the foreground. @@ -213,10 +234,15 @@ def run(config, runApp=twistd.runApp): print("%s is not a recognizable node directory" % quoted_basedir, file=err) return 1 - twistd_args = ["--nodaemon", "--rundir", basedir] + twistd_args = [ + # ensure twistd machinery does not daemonize. + "--nodaemon", + "--rundir", basedir, + ] if sys.platform != "win32": - pidfile = get_pidfile(basedir) - twistd_args.extend(["--pidfile", pidfile]) + # turn off Twisted's pid-file to use our own -- but not on + # windows, because twistd doesn't know about pidfiles there + twistd_args.extend(["--pidfile", None]) twistd_args.extend(config.twistd_args) twistd_args.append("DaemonizeTahoeNode") # point at our DaemonizeTahoeNodePlugin @@ -232,10 +258,18 @@ def run(config, runApp=twistd.runApp): return 1 twistd_config.loadedPlugins = {"DaemonizeTahoeNode": DaemonizeTahoeNodePlugin(nodetype, basedir)} - # handle invalid PID file (twistd might not start otherwise) - if sys.platform != "win32" and get_pid_from_pidfile(pidfile) == -1: - print("found invalid PID file in %s - deleting it" % basedir, file=err) - os.remove(pidfile) + # our own pid-style file contains PID and process creation time + pidfile = FilePath(get_pidfile(config['basedir'])) + try: + check_pid_process(pidfile) + except (ProcessInTheWay, InvalidPidFile) as e: + print("ERROR: {}".format(e), file=err) + return 1 + else: + reactor.addSystemEventTrigger( + "after", "shutdown", + lambda: cleanup_pidfile(pidfile) + ) # We always pass --nodaemon so twistd.runApp does not daemonize. print("running node in %s" % (quoted_basedir,), file=out) diff --git a/src/allmydata/stats.py b/src/allmydata/stats.py index 13ed8817c..f6361b074 100644 --- a/src/allmydata/stats.py +++ b/src/allmydata/stats.py @@ -1,54 +1,39 @@ """ Ported to Python 3. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals -from future.utils import PY2 -if PY2: - from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 - from time import clock as process_time -else: - from time import process_time +from collections import deque +from time import process_time import time +from typing import Deque, Tuple from twisted.application import service from twisted.application.internet import TimerService from zope.interface import implementer -from foolscap.api import eventually from allmydata.util import log, dictutil from allmydata.interfaces import IStatsProducer @implementer(IStatsProducer) class CPUUsageMonitor(service.MultiService): - HISTORY_LENGTH = 15 - POLL_INTERVAL = 60 # type: float + HISTORY_LENGTH: int = 15 + POLL_INTERVAL: float = 60 + initial_cpu: float = 0.0 def __init__(self): service.MultiService.__init__(self) - # we don't use process_time() here, because the constructor is run by - # the twistd parent process (as it loads the .tac file), whereas the - # rest of the program will be run by the child process, after twistd - # forks. Instead, set self.initial_cpu as soon as the reactor starts - # up. - self.initial_cpu = 0.0 # just in case - eventually(self._set_initial_cpu) - self.samples = [] + self.samples: Deque[Tuple[float, float]] = deque([], self.HISTORY_LENGTH + 1) # we provide 1min, 5min, and 15min moving averages TimerService(self.POLL_INTERVAL, self.check).setServiceParent(self) - def _set_initial_cpu(self): + def startService(self): self.initial_cpu = process_time() + return super().startService() def check(self): now_wall = time.time() now_cpu = process_time() self.samples.append( (now_wall, now_cpu) ) - while len(self.samples) > self.HISTORY_LENGTH+1: - self.samples.pop(0) def _average_N_minutes(self, size): if len(self.samples) < size+1: diff --git a/src/allmydata/storage/common.py b/src/allmydata/storage/common.py index e5563647f..17a3f41b7 100644 --- a/src/allmydata/storage/common.py +++ b/src/allmydata/storage/common.py @@ -16,11 +16,22 @@ from allmydata.util import base32 # Backwards compatibility. from allmydata.interfaces import DataTooLargeError # noqa: F401 -class UnknownMutableContainerVersionError(Exception): - pass -class UnknownImmutableContainerVersionError(Exception): +class UnknownContainerVersionError(Exception): + def __init__(self, filename, version): + self.filename = filename + self.version = version + + def __str__(self): + return "sharefile {!r} had unexpected version {!r}".format( + self.filename, + self.version, + ) + +class UnknownMutableContainerVersionError(UnknownContainerVersionError): pass +class UnknownImmutableContainerVersionError(UnknownContainerVersionError): + pass def si_b2a(storageindex): return base32.b2a(storageindex) diff --git a/src/allmydata/storage/crawler.py b/src/allmydata/storage/crawler.py index bd4f4f432..7516bc4e9 100644 --- a/src/allmydata/storage/crawler.py +++ b/src/allmydata/storage/crawler.py @@ -11,23 +11,185 @@ from __future__ import print_function from future.utils import PY2, PY3 if PY2: - # We don't import bytes, object, dict, and list just in case they're used, - # so as not to create brittle pickles with random magic objects. - from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, range, str, max, min # noqa: F401 + from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 -import os, time, struct -try: - import cPickle as pickle -except ImportError: - import pickle # type: ignore +import os +import time +import json +import struct from twisted.internet import reactor from twisted.application import service +from twisted.python.filepath import FilePath from allmydata.storage.common import si_b2a from allmydata.util import fileutil class TimeSliceExceeded(Exception): pass + +class MigratePickleFileError(Exception): + """ + A pickle-format file exists (the FilePath to the file will be the + single arg). + """ + pass + + +def _convert_cycle_data(state): + """ + :param dict state: cycle-to-date or history-item state + + :return dict: the state in the JSON form + """ + + def _convert_expiration_mode(value): + # original is a 4-tuple, with the last element being a 2-tuple + # .. convert both to lists + return [ + value[0], + value[1], + value[2], + list(value[3]), + ] + + def _convert_lease_age(value): + # if we're in cycle-to-date, this is a dict + if isinstance(value, dict): + return { + "{},{}".format(k[0], k[1]): v + for k, v in value.items() + } + # otherwise, it's a history-item and they're 3-tuples + return [ + list(v) + for v in value + ] + + converters = { + "configured-expiration-mode": _convert_expiration_mode, + "cycle-start-finish-times": list, + "lease-age-histogram": _convert_lease_age, + "corrupt-shares": lambda value: [ + list(x) + for x in value + ], + "leases-per-share-histogram": lambda value: { + str(k): v + for k, v in value.items() + }, + } + return { + k: converters.get(k, lambda z: z)(v) + for k, v in state.items() + } + + +def _convert_pickle_state_to_json(state): + """ + :param dict state: the pickled state + + :return dict: the state in the JSON form + """ + assert state["version"] == 1, "Only known version is 1" + + converters = { + "cycle-to-date": _convert_cycle_data, + } + return { + k: converters.get(k, lambda x: x)(v) + for k, v in state.items() + } + + +def _upgrade_pickle_to_json(state_path, convert_pickle): + """ + :param FilePath state_path: the filepath to ensure is json + + :param Callable[dict] convert_pickle: function to change + pickle-style state into JSON-style state + + :returns FilePath: the local path where the state is stored + + If this state is pickle, convert to the JSON format and return the + JSON path. + """ + json_state_path = state_path.siblingExtension(".json") + + # if there's no file there at all, we're done because there's + # nothing to upgrade + if not state_path.exists(): + return json_state_path + + # upgrade the pickle data to JSON + import pickle + with state_path.open("rb") as f: + state = pickle.load(f) + new_state = convert_pickle(state) + _dump_json_to_file(new_state, json_state_path) + + # we've written the JSON, delete the pickle + state_path.remove() + return json_state_path + + +def _confirm_json_format(fp): + """ + :param FilePath fp: the original (pickle) name of a state file + + This confirms that we do _not_ have the pickle-version of a + state-file and _do_ either have nothing, or the JSON version. If + the pickle-version exists, an exception is raised. + + :returns FilePath: the JSON name of a state file + """ + if fp.path.endswith(".json"): + return fp + jsonfp = fp.siblingExtension(".json") + if fp.exists(): + raise MigratePickleFileError(fp) + return jsonfp + + +def _dump_json_to_file(js, afile): + """ + Dump the JSON object `js` to the FilePath `afile` + """ + with afile.open("wb") as f: + data = json.dumps(js) + if PY2: + f.write(data) + else: + f.write(data.encode("utf8")) + + +class _LeaseStateSerializer(object): + """ + Read and write state for LeaseCheckingCrawler. This understands + how to read the legacy pickle format files and upgrade them to the + new JSON format (which will occur automatically). + """ + + def __init__(self, state_path): + self._path = _confirm_json_format(FilePath(state_path)) + + def load(self): + """ + :returns: deserialized JSON state + """ + with self._path.open("rb") as f: + return json.load(f) + + def save(self, data): + """ + Serialize the given data as JSON into the state-path + :returns: None + """ + tmpfile = self._path.siblingExtension(".tmp") + _dump_json_to_file(data, tmpfile) + fileutil.move_into_place(tmpfile.path, self._path.path) + return None + + class ShareCrawler(service.MultiService): """A ShareCrawler subclass is attached to a StorageServer, and periodically walks all of its shares, processing each one in some @@ -90,7 +252,7 @@ class ShareCrawler(service.MultiService): self.allowed_cpu_percentage = allowed_cpu_percentage self.server = server self.sharedir = server.sharedir - self.statefile = statefile + self._state_serializer = _LeaseStateSerializer(statefile) self.prefixes = [si_b2a(struct.pack(">H", i << (16-10)))[:2] for i in range(2**10)] if PY3: @@ -213,8 +375,7 @@ class ShareCrawler(service.MultiService): # of the last bucket to be processed, or # None if we are sleeping between cycles try: - with open(self.statefile, "rb") as f: - state = pickle.load(f) + state = self._state_serializer.load() except Exception: state = {"version": 1, "last-cycle-finished": None, @@ -250,12 +411,7 @@ class ShareCrawler(service.MultiService): else: last_complete_prefix = self.prefixes[lcpi] self.state["last-complete-prefix"] = last_complete_prefix - tmpfile = self.statefile + ".tmp" - with open(tmpfile, "wb") as f: - # Newer protocols won't work in Python 2; when it is dropped, - # protocol v4 can be used (added in Python 3.4). - pickle.dump(self.state, f, protocol=2) - fileutil.move_into_place(tmpfile, self.statefile) + self._state_serializer.save(self.get_state()) def startService(self): # arrange things to look like we were just sleeping, so diff --git a/src/allmydata/storage/expirer.py b/src/allmydata/storage/expirer.py index 7c6cd8218..55ab51843 100644 --- a/src/allmydata/storage/expirer.py +++ b/src/allmydata/storage/expirer.py @@ -5,15 +5,69 @@ from __future__ import unicode_literals from future.utils import PY2 if PY2: - # We omit anything that might end up in pickle, just in case. - from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, range, str, max, min # noqa: F401 - -import time, os, pickle, struct -from allmydata.storage.crawler import ShareCrawler + from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 +import json +import time +import os +import struct +from allmydata.storage.crawler import ( + ShareCrawler, + _confirm_json_format, + _convert_cycle_data, + _dump_json_to_file, +) from allmydata.storage.shares import get_share_file from allmydata.storage.common import UnknownMutableContainerVersionError, \ UnknownImmutableContainerVersionError from twisted.python import log as twlog +from twisted.python.filepath import FilePath + + +def _convert_pickle_state_to_json(state): + """ + Convert a pickle-serialized crawler-history state to the new JSON + format. + + :param dict state: the pickled state + + :return dict: the state in the JSON form + """ + return { + str(k): _convert_cycle_data(v) + for k, v in state.items() + } + + +class _HistorySerializer(object): + """ + Serialize the 'history' file of the lease-crawler state. This is + "storage/lease_checker.history" for the pickle or + "storage/lease_checker.history.json" for the new JSON format. + """ + + def __init__(self, history_path): + self._path = _confirm_json_format(FilePath(history_path)) + + if not self._path.exists(): + _dump_json_to_file({}, self._path) + + def load(self): + """ + Deserialize the existing data. + + :return dict: the existing history state + """ + with self._path.open("rb") as f: + history = json.load(f) + return history + + def save(self, new_history): + """ + Serialize the existing data as JSON. + """ + _dump_json_to_file(new_history, self._path) + return None + class LeaseCheckingCrawler(ShareCrawler): """I examine the leases on all shares, determining which are still valid @@ -63,7 +117,7 @@ class LeaseCheckingCrawler(ShareCrawler): override_lease_duration, # used if expiration_mode=="age" cutoff_date, # used if expiration_mode=="cutoff-date" sharetypes): - self.historyfile = historyfile + self._history_serializer = _HistorySerializer(historyfile) self.expiration_enabled = expiration_enabled self.mode = mode self.override_lease_duration = None @@ -91,14 +145,6 @@ class LeaseCheckingCrawler(ShareCrawler): for k in so_far: self.state["cycle-to-date"].setdefault(k, so_far[k]) - # initialize history - if not os.path.exists(self.historyfile): - history = {} # cyclenum -> dict - with open(self.historyfile, "wb") as f: - # Newer protocols won't work in Python 2; when it is dropped, - # protocol v4 can be used (added in Python 3.4). - pickle.dump(history, f, protocol=2) - def create_empty_cycle_dict(self): recovered = self.create_empty_recovered_dict() so_far = {"corrupt-shares": [], @@ -142,7 +188,7 @@ class LeaseCheckingCrawler(ShareCrawler): struct.error): twlog.msg("lease-checker error processing %s" % sharefile) twlog.err() - which = (storage_index_b32, shnum) + which = [storage_index_b32, shnum] self.state["cycle-to-date"]["corrupt-shares"].append(which) wks = (1, 1, 1, "unknown") would_keep_shares.append(wks) @@ -212,7 +258,7 @@ class LeaseCheckingCrawler(ShareCrawler): num_valid_leases_configured += 1 so_far = self.state["cycle-to-date"] - self.increment(so_far["leases-per-share-histogram"], num_leases, 1) + self.increment(so_far["leases-per-share-histogram"], str(num_leases), 1) self.increment_space("examined", s, sharetype) would_keep_share = [1, 1, 1, sharetype] @@ -291,12 +337,14 @@ class LeaseCheckingCrawler(ShareCrawler): start = self.state["current-cycle-start-time"] now = time.time() - h["cycle-start-finish-times"] = (start, now) + h["cycle-start-finish-times"] = [start, now] h["expiration-enabled"] = self.expiration_enabled - h["configured-expiration-mode"] = (self.mode, - self.override_lease_duration, - self.cutoff_date, - self.sharetypes_to_expire) + h["configured-expiration-mode"] = [ + self.mode, + self.override_lease_duration, + self.cutoff_date, + self.sharetypes_to_expire, + ] s = self.state["cycle-to-date"] @@ -314,16 +362,12 @@ class LeaseCheckingCrawler(ShareCrawler): # copy() needs to become a deepcopy h["space-recovered"] = s["space-recovered"].copy() - with open(self.historyfile, "rb") as f: - history = pickle.load(f) - history[cycle] = h + history = self._history_serializer.load() + history[str(cycle)] = h while len(history) > 10: - oldcycles = sorted(history.keys()) - del history[oldcycles[0]] - with open(self.historyfile, "wb") as f: - # Newer protocols won't work in Python 2; when it is dropped, - # protocol v4 can be used (added in Python 3.4). - pickle.dump(history, f, protocol=2) + oldcycles = sorted(int(k) for k in history.keys()) + del history[str(oldcycles[0])] + self._history_serializer.save(history) def get_state(self): """In addition to the crawler state described in @@ -392,9 +436,7 @@ class LeaseCheckingCrawler(ShareCrawler): progress = self.get_progress() state = ShareCrawler.get_state(self) # does a shallow copy - with open(self.historyfile, "rb") as f: - history = pickle.load(f) - state["history"] = history + state["history"] = self._history_serializer.load() if not progress["cycle-in-progress"]: del state["cycle-to-date"] @@ -406,10 +448,12 @@ class LeaseCheckingCrawler(ShareCrawler): lah = so_far["lease-age-histogram"] so_far["lease-age-histogram"] = self.convert_lease_age_histogram(lah) so_far["expiration-enabled"] = self.expiration_enabled - so_far["configured-expiration-mode"] = (self.mode, - self.override_lease_duration, - self.cutoff_date, - self.sharetypes_to_expire) + so_far["configured-expiration-mode"] = [ + self.mode, + self.override_lease_duration, + self.cutoff_date, + self.sharetypes_to_expire, + ] so_far_sr = so_far["space-recovered"] remaining_sr = {} diff --git a/src/allmydata/storage/http_client.py b/src/allmydata/storage/http_client.py new file mode 100644 index 000000000..79bf061c9 --- /dev/null +++ b/src/allmydata/storage/http_client.py @@ -0,0 +1,908 @@ +""" +HTTP client that talks to the HTTP storage server. +""" + +from __future__ import annotations + +from typing import Union, Optional, Sequence, Mapping, BinaryIO +from base64 import b64encode +from io import BytesIO +from os import SEEK_END + +from attrs import define, asdict, frozen, field + +# TODO Make sure to import Python version? +from cbor2 import loads, dumps +from pycddl import Schema +from collections_extended import RangeMap +from werkzeug.datastructures import Range, ContentRange +from twisted.web.http_headers import Headers +from twisted.web import http +from twisted.web.iweb import IPolicyForHTTPS +from twisted.internet.defer import inlineCallbacks, returnValue, fail, Deferred, succeed +from twisted.internet.interfaces import ( + IOpenSSLClientConnectionCreator, + IReactorTime, + IDelayedCall, +) +from twisted.internet.ssl import CertificateOptions +from twisted.web.client import Agent, HTTPConnectionPool +from zope.interface import implementer +from hyperlink import DecodedURL +import treq +from treq.client import HTTPClient +from treq.testing import StubTreq +from OpenSSL import SSL +from cryptography.hazmat.bindings.openssl.binding import Binding +from werkzeug.http import parse_content_range_header + +from .http_common import ( + swissnum_auth_header, + Secrets, + get_content_type, + CBOR_MIME_TYPE, + get_spki_hash, +) +from .common import si_b2a +from ..util.hashutil import timing_safe_compare +from ..util.deferredutil import async_to_deferred + +_OPENSSL = Binding().lib + + +def _encode_si(si): # type: (bytes) -> str + """Encode the storage index into Unicode string.""" + return str(si_b2a(si), "ascii") + + +class ClientException(Exception): + """An unexpected response code from the server.""" + + def __init__(self, code, *additional_args): + Exception.__init__(self, code, *additional_args) + self.code = code + + +# Schemas for server responses. +# +# Tags are of the form #6.nnn, where the number is documented at +# https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml. Notably, #6.258 +# indicates a set. +_SCHEMAS = { + "get_version": Schema( + """ + response = {'http://allmydata.org/tahoe/protocols/storage/v1' => { + 'maximum-immutable-share-size' => uint + 'maximum-mutable-share-size' => uint + 'available-space' => uint + 'tolerates-immutable-read-overrun' => bool + 'delete-mutable-shares-with-zero-length-writev' => bool + 'fills-holes-with-zero-bytes' => bool + 'prevents-read-past-end-of-share-data' => bool + } + 'application-version' => bstr + } + """ + ), + "allocate_buckets": Schema( + """ + response = { + already-have: #6.258([0*256 uint]) + allocated: #6.258([0*256 uint]) + } + """ + ), + "immutable_write_share_chunk": Schema( + """ + response = { + required: [0* {begin: uint, end: uint}] + } + """ + ), + "list_shares": Schema( + """ + response = #6.258([0*256 uint]) + """ + ), + "mutable_read_test_write": Schema( + """ + response = { + "success": bool, + "data": {0*256 share_number: [0* bstr]} + } + share_number = uint + """ + ), + "mutable_list_shares": Schema( + """ + response = #6.258([0*256 uint]) + """ + ), +} + + +@define +class _LengthLimitedCollector: + """ + Collect data using ``treq.collect()``, with limited length. + """ + + remaining_length: int + timeout_on_silence: IDelayedCall + f: BytesIO = field(factory=BytesIO) + + def __call__(self, data: bytes): + self.timeout_on_silence.reset(60) + self.remaining_length -= len(data) + if self.remaining_length < 0: + raise ValueError("Response length was too long") + self.f.write(data) + + +def limited_content( + response, + clock: IReactorTime, + max_length: int = 30 * 1024 * 1024, +) -> Deferred[BinaryIO]: + """ + Like ``treq.content()``, but limit data read from the response to a set + length. If the response is longer than the max allowed length, the result + fails with a ``ValueError``. + + A potentially useful future improvement would be using a temporary file to + store the content; since filesystem buffering means that would use memory + for small responses and disk for large responses. + + This will time out if no data is received for 60 seconds; so long as a + trickle of data continues to arrive, it will continue to run. + """ + d = succeed(None) + timeout = clock.callLater(60, d.cancel) + collector = _LengthLimitedCollector(max_length, timeout) + + # Make really sure everything gets called in Deferred context, treq might + # call collector directly... + d.addCallback(lambda _: treq.collect(response, collector)) + + def done(_): + timeout.cancel() + collector.f.seek(0) + return collector.f + + def failed(f): + if timeout.active(): + timeout.cancel() + return f + + return d.addCallbacks(done, failed) + + +@define +class ImmutableCreateResult(object): + """Result of creating a storage index for an immutable.""" + + already_have: set[int] + allocated: set[int] + + +class _TLSContextFactory(CertificateOptions): + """ + Create a context that validates the way Tahoe-LAFS wants to: based on a + pinned certificate hash, rather than a certificate authority. + + Originally implemented as part of Foolscap. To comply with the license, + here's the original licensing terms: + + Copyright (c) 2006-2008 Brian Warner + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + """ + + def __init__(self, expected_spki_hash: bytes): + self.expected_spki_hash = expected_spki_hash + CertificateOptions.__init__(self) + + def getContext(self) -> SSL.Context: + def always_validate(conn, cert, errno, depth, preverify_ok): + # This function is called to validate the certificate received by + # the other end. OpenSSL calls it multiple times, for each errno + # for each certificate. + + # We do not care about certificate authorities or revocation + # lists, we just want to know that the certificate has a valid + # signature and follow the chain back to one which is + # self-signed. We need to protect against forged signatures, but + # not the usual TLS concerns about invalid CAs or revoked + # certificates. + things_are_ok = ( + _OPENSSL.X509_V_OK, + _OPENSSL.X509_V_ERR_CERT_NOT_YET_VALID, + _OPENSSL.X509_V_ERR_CERT_HAS_EXPIRED, + _OPENSSL.X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT, + _OPENSSL.X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN, + ) + # TODO can we do this once instead of multiple times? + if errno in things_are_ok and timing_safe_compare( + get_spki_hash(cert.to_cryptography()), self.expected_spki_hash + ): + return 1 + # TODO: log the details of the error, because otherwise they get + # lost in the PyOpenSSL exception that will eventually be raised + # (possibly OpenSSL.SSL.Error: certificate verify failed) + return 0 + + ctx = CertificateOptions.getContext(self) + + # VERIFY_PEER means we ask the the other end for their certificate. + ctx.set_verify(SSL.VERIFY_PEER, always_validate) + return ctx + + +@implementer(IPolicyForHTTPS) +@implementer(IOpenSSLClientConnectionCreator) +@define +class _StorageClientHTTPSPolicy: + """ + A HTTPS policy that ensures the SPKI hash of the public key matches a known + hash, i.e. pinning-based validation. + """ + + expected_spki_hash: bytes + + # IPolicyForHTTPS + def creatorForNetloc(self, hostname, port): + return self + + # IOpenSSLClientConnectionCreator + def clientConnectionForTLS(self, tlsProtocol): + return SSL.Connection( + _TLSContextFactory(self.expected_spki_hash).getContext(), None + ) + + +@define(hash=True) +class StorageClient(object): + """ + Low-level HTTP client that talks to the HTTP storage server. + """ + + # If set, we're doing unit testing and we should call this with + # HTTPConnectionPool we create. + TEST_MODE_REGISTER_HTTP_POOL = None + + @classmethod + def start_test_mode(cls, callback): + """Switch to testing mode. + + In testing mode we register the pool with test system using the given + callback so it can Do Things, most notably killing off idle HTTP + connections at test shutdown and, in some tests, in the midddle of the + test. + """ + cls.TEST_MODE_REGISTER_HTTP_POOL = callback + + @classmethod + def stop_test_mode(cls): + """Stop testing mode.""" + cls.TEST_MODE_REGISTER_HTTP_POOL = None + + # The URL is a HTTPS URL ("https://..."). To construct from a NURL, use + # ``StorageClient.from_nurl()``. + _base_url: DecodedURL + _swissnum: bytes + _treq: Union[treq, StubTreq, HTTPClient] + _clock: IReactorTime + + @classmethod + def from_nurl( + cls, + nurl: DecodedURL, + reactor, + ) -> StorageClient: + """ + Create a ``StorageClient`` for the given NURL. + """ + assert nurl.fragment == "v=1" + assert nurl.scheme == "pb" + swissnum = nurl.path[0].encode("ascii") + certificate_hash = nurl.user.encode("ascii") + pool = HTTPConnectionPool(reactor) + + if cls.TEST_MODE_REGISTER_HTTP_POOL is not None: + cls.TEST_MODE_REGISTER_HTTP_POOL(pool) + + treq_client = HTTPClient( + Agent( + reactor, + _StorageClientHTTPSPolicy(expected_spki_hash=certificate_hash), + pool=pool, + ) + ) + + https_url = DecodedURL().replace(scheme="https", host=nurl.host, port=nurl.port) + return cls(https_url, swissnum, treq_client, reactor) + + def relative_url(self, path): + """Get a URL relative to the base URL.""" + return self._base_url.click(path) + + def _get_headers(self, headers): # type: (Optional[Headers]) -> Headers + """Return the basic headers to be used by default.""" + if headers is None: + headers = Headers() + headers.addRawHeader( + "Authorization", + swissnum_auth_header(self._swissnum), + ) + return headers + + def request( + self, + method, + url, + lease_renew_secret=None, + lease_cancel_secret=None, + upload_secret=None, + write_enabler_secret=None, + headers=None, + message_to_serialize=None, + timeout: float = 60, + **kwargs, + ): + """ + Like ``treq.request()``, but with optional secrets that get translated + into corresponding HTTP headers. + + If ``message_to_serialize`` is set, it will be serialized (by default + with CBOR) and set as the request body. + + Default timeout is 60 seconds. + """ + headers = self._get_headers(headers) + + # Add secrets: + for secret, value in [ + (Secrets.LEASE_RENEW, lease_renew_secret), + (Secrets.LEASE_CANCEL, lease_cancel_secret), + (Secrets.UPLOAD, upload_secret), + (Secrets.WRITE_ENABLER, write_enabler_secret), + ]: + if value is None: + continue + headers.addRawHeader( + "X-Tahoe-Authorization", + b"%s %s" % (secret.value.encode("ascii"), b64encode(value).strip()), + ) + + # Note we can accept CBOR: + headers.addRawHeader("Accept", CBOR_MIME_TYPE) + + # If there's a request message, serialize it and set the Content-Type + # header: + if message_to_serialize is not None: + if "data" in kwargs: + raise TypeError( + "Can't use both `message_to_serialize` and `data` " + "as keyword arguments at the same time" + ) + kwargs["data"] = dumps(message_to_serialize) + headers.addRawHeader("Content-Type", CBOR_MIME_TYPE) + + return self._treq.request( + method, url, headers=headers, timeout=timeout, **kwargs + ) + + def decode_cbor(self, response, schema: Schema): + """Given HTTP response, return decoded CBOR body.""" + + def got_content(f: BinaryIO): + data = f.read() + schema.validate_cbor(data) + return loads(data) + + if response.code > 199 and response.code < 300: + content_type = get_content_type(response.headers) + if content_type == CBOR_MIME_TYPE: + return limited_content(response, self._clock).addCallback(got_content) + else: + raise ClientException(-1, "Server didn't send CBOR") + else: + return treq.content(response).addCallback( + lambda data: fail(ClientException(response.code, response.phrase, data)) + ) + + +@define(hash=True) +class StorageClientGeneral(object): + """ + High-level HTTP APIs that aren't immutable- or mutable-specific. + """ + + _client: StorageClient + + @inlineCallbacks + def get_version(self): + """ + Return the version metadata for the server. + """ + url = self._client.relative_url("/storage/v1/version") + response = yield self._client.request("GET", url) + decoded_response = yield self._client.decode_cbor( + response, _SCHEMAS["get_version"] + ) + returnValue(decoded_response) + + @inlineCallbacks + def add_or_renew_lease( + self, storage_index: bytes, renew_secret: bytes, cancel_secret: bytes + ) -> Deferred[None]: + """ + Add or renew a lease. + + If the renewal secret matches an existing lease, it is renewed. + Otherwise a new lease is added. + """ + url = self._client.relative_url( + "/storage/v1/lease/{}".format(_encode_si(storage_index)) + ) + response = yield self._client.request( + "PUT", + url, + lease_renew_secret=renew_secret, + lease_cancel_secret=cancel_secret, + ) + + if response.code == http.NO_CONTENT: + return + else: + raise ClientException(response.code) + + +@define +class UploadProgress(object): + """ + Progress of immutable upload, per the server. + """ + + # True when upload has finished. + finished: bool + # Remaining ranges to upload. + required: RangeMap + + +@inlineCallbacks +def read_share_chunk( + client: StorageClient, + share_type: str, + storage_index: bytes, + share_number: int, + offset: int, + length: int, +) -> Deferred[bytes]: + """ + Download a chunk of data from a share. + + TODO https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3857 Failed downloads + should be transparently retried and redownloaded by the implementation a + few times so that if a failure percolates up, the caller can assume the + failure isn't a short-term blip. + + NOTE: the underlying HTTP protocol is somewhat more flexible than this API, + insofar as it doesn't always require a range. In practice a range is + always provided by the current callers. + """ + url = client.relative_url( + "/storage/v1/{}/{}/{}".format( + share_type, _encode_si(storage_index), share_number + ) + ) + # The default 60 second timeout is for getting the response, so it doesn't + # include the time it takes to download the body... so we will will deal + # with that later, via limited_content(). + response = yield client.request( + "GET", + url, + headers=Headers( + # Ranges in HTTP are _inclusive_, Python's convention is exclusive, + # but Range constructor does that the conversion for us. + {"range": [Range("bytes", [(offset, offset + length)]).to_header()]} + ), + unbuffered=True, # Don't buffer the response in memory. + ) + + if response.code == http.NO_CONTENT: + return b"" + + if response.code == http.PARTIAL_CONTENT: + content_range = parse_content_range_header( + response.headers.getRawHeaders("content-range")[0] or "" + ) + if ( + content_range is None + or content_range.stop is None + or content_range.start is None + ): + raise ValueError( + "Content-Range was missing, invalid, or in format we don't support" + ) + supposed_length = content_range.stop - content_range.start + if supposed_length > length: + raise ValueError("Server sent more than we asked for?!") + # It might also send less than we asked for. That's (probably) OK, e.g. + # if we went past the end of the file. + body = yield limited_content(response, client._clock, supposed_length) + body.seek(0, SEEK_END) + actual_length = body.tell() + if actual_length != supposed_length: + # Most likely a mutable that got changed out from under us, but + # conceivably could be a bug... + raise ValueError( + f"Length of response sent from server ({actual_length}) " + + f"didn't match Content-Range header ({supposed_length})" + ) + body.seek(0) + return body.read() + else: + # Technically HTTP allows sending an OK with full body under these + # circumstances, but the server is not designed to do that so we ignore + # that possibility for now... + raise ClientException(response.code) + + +@async_to_deferred +async def advise_corrupt_share( + client: StorageClient, + share_type: str, + storage_index: bytes, + share_number: int, + reason: str, +): + assert isinstance(reason, str) + url = client.relative_url( + "/storage/v1/{}/{}/{}/corrupt".format( + share_type, _encode_si(storage_index), share_number + ) + ) + message = {"reason": reason} + response = await client.request("POST", url, message_to_serialize=message) + if response.code == http.OK: + return + else: + raise ClientException( + response.code, + ) + + +@define(hash=True) +class StorageClientImmutables(object): + """ + APIs for interacting with immutables. + """ + + _client: StorageClient + + @inlineCallbacks + def create( + self, + storage_index, + share_numbers, + allocated_size, + upload_secret, + lease_renew_secret, + lease_cancel_secret, + ): # type: (bytes, set[int], int, bytes, bytes, bytes) -> Deferred[ImmutableCreateResult] + """ + Create a new storage index for an immutable. + + TODO https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3857 retry + internally on failure, to ensure the operation fully succeeded. If + sufficient number of failures occurred, the result may fire with an + error, but there's no expectation that user code needs to have a + recovery codepath; it will most likely just report an error to the + user. + + Result fires when creating the storage index succeeded, if creating the + storage index failed the result will fire with an exception. + """ + url = self._client.relative_url( + "/storage/v1/immutable/" + _encode_si(storage_index) + ) + message = {"share-numbers": share_numbers, "allocated-size": allocated_size} + + response = yield self._client.request( + "POST", + url, + lease_renew_secret=lease_renew_secret, + lease_cancel_secret=lease_cancel_secret, + upload_secret=upload_secret, + message_to_serialize=message, + ) + decoded_response = yield self._client.decode_cbor( + response, _SCHEMAS["allocate_buckets"] + ) + returnValue( + ImmutableCreateResult( + already_have=decoded_response["already-have"], + allocated=decoded_response["allocated"], + ) + ) + + @inlineCallbacks + def abort_upload( + self, storage_index: bytes, share_number: int, upload_secret: bytes + ) -> Deferred[None]: + """Abort the upload.""" + url = self._client.relative_url( + "/storage/v1/immutable/{}/{}/abort".format( + _encode_si(storage_index), share_number + ) + ) + response = yield self._client.request( + "PUT", + url, + upload_secret=upload_secret, + ) + + if response.code == http.OK: + return + else: + raise ClientException( + response.code, + ) + + @inlineCallbacks + def write_share_chunk( + self, storage_index, share_number, upload_secret, offset, data + ): # type: (bytes, int, bytes, int, bytes) -> Deferred[UploadProgress] + """ + Upload a chunk of data for a specific share. + + TODO https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3857 The + implementation should retry failed uploads transparently a number of + times, so that if a failure percolates up, the caller can assume the + failure isn't a short-term blip. + + Result fires when the upload succeeded, with a boolean indicating + whether the _complete_ share (i.e. all chunks, not just this one) has + been uploaded. + """ + url = self._client.relative_url( + "/storage/v1/immutable/{}/{}".format( + _encode_si(storage_index), share_number + ) + ) + response = yield self._client.request( + "PATCH", + url, + upload_secret=upload_secret, + data=data, + headers=Headers( + { + "content-range": [ + ContentRange("bytes", offset, offset + len(data)).to_header() + ] + } + ), + ) + + if response.code == http.OK: + # Upload is still unfinished. + finished = False + elif response.code == http.CREATED: + # Upload is done! + finished = True + else: + raise ClientException( + response.code, + ) + body = yield self._client.decode_cbor( + response, _SCHEMAS["immutable_write_share_chunk"] + ) + remaining = RangeMap() + for chunk in body["required"]: + remaining.set(True, chunk["begin"], chunk["end"]) + returnValue(UploadProgress(finished=finished, required=remaining)) + + def read_share_chunk( + self, storage_index, share_number, offset, length + ): # type: (bytes, int, int, int) -> Deferred[bytes] + """ + Download a chunk of data from a share. + """ + return read_share_chunk( + self._client, "immutable", storage_index, share_number, offset, length + ) + + @inlineCallbacks + def list_shares(self, storage_index: bytes) -> Deferred[set[int]]: + """ + Return the set of shares for a given storage index. + """ + url = self._client.relative_url( + "/storage/v1/immutable/{}/shares".format(_encode_si(storage_index)) + ) + response = yield self._client.request( + "GET", + url, + ) + if response.code == http.OK: + body = yield self._client.decode_cbor(response, _SCHEMAS["list_shares"]) + returnValue(set(body)) + else: + raise ClientException(response.code) + + def advise_corrupt_share( + self, + storage_index: bytes, + share_number: int, + reason: str, + ): + """Indicate a share has been corrupted, with a human-readable message.""" + return advise_corrupt_share( + self._client, "immutable", storage_index, share_number, reason + ) + + +@frozen +class WriteVector: + """Data to write to a chunk.""" + + offset: int + data: bytes + + +@frozen +class TestVector: + """Checks to make on a chunk before writing to it.""" + + offset: int + size: int + specimen: bytes + + +@frozen +class ReadVector: + """ + Reads to do on chunks, as part of a read/test/write operation. + """ + + offset: int + size: int + + +@frozen +class TestWriteVectors: + """Test and write vectors for a specific share.""" + + test_vectors: Sequence[TestVector] = field(factory=list) + write_vectors: Sequence[WriteVector] = field(factory=list) + new_length: Optional[int] = None + + def asdict(self) -> dict: + """Return dictionary suitable for sending over CBOR.""" + d = asdict(self) + d["test"] = d.pop("test_vectors") + d["write"] = d.pop("write_vectors") + d["new-length"] = d.pop("new_length") + return d + + +@frozen +class ReadTestWriteResult: + """Result of sending read-test-write vectors.""" + + success: bool + # Map share numbers to reads corresponding to the request's list of + # ReadVectors: + reads: Mapping[int, Sequence[bytes]] + + +@frozen +class StorageClientMutables: + """ + APIs for interacting with mutables. + """ + + _client: StorageClient + + @async_to_deferred + async def read_test_write_chunks( + self, + storage_index: bytes, + write_enabler_secret: bytes, + lease_renew_secret: bytes, + lease_cancel_secret: bytes, + testwrite_vectors: dict[int, TestWriteVectors], + read_vector: list[ReadVector], + ) -> ReadTestWriteResult: + """ + Read, test, and possibly write chunks to a particular mutable storage + index. + + Reads are done before writes. + + Given a mapping between share numbers and test/write vectors, the tests + are done and if they are valid the writes are done. + """ + url = self._client.relative_url( + "/storage/v1/mutable/{}/read-test-write".format(_encode_si(storage_index)) + ) + message = { + "test-write-vectors": { + share_number: twv.asdict() + for (share_number, twv) in testwrite_vectors.items() + }, + "read-vector": [asdict(r) for r in read_vector], + } + response = await self._client.request( + "POST", + url, + write_enabler_secret=write_enabler_secret, + lease_renew_secret=lease_renew_secret, + lease_cancel_secret=lease_cancel_secret, + message_to_serialize=message, + ) + if response.code == http.OK: + result = await self._client.decode_cbor( + response, _SCHEMAS["mutable_read_test_write"] + ) + return ReadTestWriteResult(success=result["success"], reads=result["data"]) + else: + raise ClientException(response.code, (await response.content())) + + def read_share_chunk( + self, + storage_index: bytes, + share_number: int, + offset: int, + length: int, + ) -> Deferred[bytes]: + """ + Download a chunk of data from a share. + """ + return read_share_chunk( + self._client, "mutable", storage_index, share_number, offset, length + ) + + @async_to_deferred + async def list_shares(self, storage_index: bytes) -> set[int]: + """ + List the share numbers for a given storage index. + """ + url = self._client.relative_url( + "/storage/v1/mutable/{}/shares".format(_encode_si(storage_index)) + ) + response = await self._client.request("GET", url) + if response.code == http.OK: + return await self._client.decode_cbor( + response, _SCHEMAS["mutable_list_shares"] + ) + else: + raise ClientException(response.code) + + def advise_corrupt_share( + self, + storage_index: bytes, + share_number: int, + reason: str, + ): + """Indicate a share has been corrupted, with a human-readable message.""" + return advise_corrupt_share( + self._client, "mutable", storage_index, share_number, reason + ) diff --git a/src/allmydata/storage/http_common.py b/src/allmydata/storage/http_common.py new file mode 100644 index 000000000..123ce403b --- /dev/null +++ b/src/allmydata/storage/http_common.py @@ -0,0 +1,54 @@ +""" +Common HTTP infrastructure for the storge server. +""" + +from enum import Enum +from base64 import urlsafe_b64encode, b64encode +from hashlib import sha256 +from typing import Optional + +from cryptography.x509 import Certificate +from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat + +from werkzeug.http import parse_options_header +from twisted.web.http_headers import Headers + +CBOR_MIME_TYPE = "application/cbor" + + +def get_content_type(headers: Headers) -> Optional[str]: + """ + Get the content type from the HTTP ``Content-Type`` header. + + Returns ``None`` if no content-type was set. + """ + values = headers.getRawHeaders("content-type") or [None] + content_type = parse_options_header(values[0])[0] or None + return content_type + + +def swissnum_auth_header(swissnum: bytes) -> bytes: + """Return value for ``Authentication`` header.""" + return b"Tahoe-LAFS " + b64encode(swissnum).strip() + + +class Secrets(Enum): + """Different kinds of secrets the client may send.""" + + LEASE_RENEW = "lease-renew-secret" + LEASE_CANCEL = "lease-cancel-secret" + UPLOAD = "upload-secret" + WRITE_ENABLER = "write-enabler" + + +def get_spki_hash(certificate: Certificate) -> bytes: + """ + Get the public key hash, as per RFC 7469: base64 of sha256 of the public + key encoded in DER + Subject Public Key Info format. + + We use the URL-safe base64 variant, since this is typically found in NURLs. + """ + public_key_bytes = certificate.public_key().public_bytes( + Encoding.DER, PublicFormat.SubjectPublicKeyInfo + ) + return urlsafe_b64encode(sha256(public_key_bytes).digest()).strip().rstrip(b"=") diff --git a/src/allmydata/storage/http_server.py b/src/allmydata/storage/http_server.py new file mode 100644 index 000000000..3902976ba --- /dev/null +++ b/src/allmydata/storage/http_server.py @@ -0,0 +1,923 @@ +""" +HTTP server for storage. +""" + +from __future__ import annotations + +from typing import Dict, List, Set, Tuple, Any, Callable, Union, cast +from functools import wraps +from base64 import b64decode +import binascii +from tempfile import TemporaryFile + +from cryptography.x509 import Certificate as CryptoCertificate +from zope.interface import implementer +from klein import Klein +from twisted.web import http +from twisted.internet.interfaces import ( + IListeningPort, + IStreamServerEndpoint, + IPullProducer, +) +from twisted.internet.address import IPv4Address, IPv6Address +from twisted.internet.defer import Deferred +from twisted.internet.ssl import CertificateOptions, Certificate, PrivateCertificate +from twisted.web.server import Site, Request +from twisted.protocols.tls import TLSMemoryBIOFactory +from twisted.python.filepath import FilePath + +from attrs import define, field, Factory +from werkzeug.http import ( + parse_range_header, + parse_content_range_header, + parse_accept_header, +) +from werkzeug.routing import BaseConverter, ValidationError +from werkzeug.datastructures import ContentRange +from hyperlink import DecodedURL +from cryptography.x509 import load_pem_x509_certificate + + +# TODO Make sure to use pure Python versions? +from cbor2 import dump, loads +from pycddl import Schema, ValidationError as CDDLValidationError +from .server import StorageServer +from .http_common import ( + swissnum_auth_header, + Secrets, + get_content_type, + CBOR_MIME_TYPE, + get_spki_hash, +) + +from .common import si_a2b +from .immutable import BucketWriter, ConflictingWriteError +from ..util.hashutil import timing_safe_compare +from ..util.base32 import rfc3548_alphabet +from allmydata.interfaces import BadWriteEnablerError + + +class ClientSecretsException(Exception): + """The client did not send the appropriate secrets.""" + + +def _extract_secrets( + header_values, required_secrets +): # type: (List[str], Set[Secrets]) -> Dict[Secrets, bytes] + """ + Given list of values of ``X-Tahoe-Authorization`` headers, and required + secrets, return dictionary mapping secrets to decoded values. + + If too few secrets were given, or too many, a ``ClientSecretsException`` is + raised. + """ + string_key_to_enum = {e.value: e for e in Secrets} + result = {} + try: + for header_value in header_values: + string_key, string_value = header_value.strip().split(" ", 1) + key = string_key_to_enum[string_key] + value = b64decode(string_value) + if key in (Secrets.LEASE_CANCEL, Secrets.LEASE_RENEW) and len(value) != 32: + raise ClientSecretsException("Lease secrets must be 32 bytes long") + result[key] = value + except (ValueError, KeyError): + raise ClientSecretsException("Bad header value(s): {}".format(header_values)) + if result.keys() != required_secrets: + raise ClientSecretsException( + "Expected {} secrets, got {}".format(required_secrets, result.keys()) + ) + return result + + +def _authorization_decorator(required_secrets): + """ + Check the ``Authorization`` header, and extract ``X-Tahoe-Authorization`` + headers and pass them in. + """ + + def decorator(f): + @wraps(f) + def route(self, request, *args, **kwargs): + if not timing_safe_compare( + request.requestHeaders.getRawHeaders("Authorization", [None])[0].encode( + "utf-8" + ), + swissnum_auth_header(self._swissnum), + ): + request.setResponseCode(http.UNAUTHORIZED) + return b"" + authorization = request.requestHeaders.getRawHeaders( + "X-Tahoe-Authorization", [] + ) + try: + secrets = _extract_secrets(authorization, required_secrets) + except ClientSecretsException: + request.setResponseCode(http.BAD_REQUEST) + return b"Missing required secrets" + return f(self, request, secrets, *args, **kwargs) + + return route + + return decorator + + +def _authorized_route(app, required_secrets, *route_args, **route_kwargs): + """ + Like Klein's @route, but with additional support for checking the + ``Authorization`` header as well as ``X-Tahoe-Authorization`` headers. The + latter will get passed in as second argument to wrapped functions, a + dictionary mapping a ``Secret`` value to the uploaded secret. + + :param required_secrets: Set of required ``Secret`` types. + """ + + def decorator(f): + @app.route(*route_args, **route_kwargs) + @_authorization_decorator(required_secrets) + @wraps(f) + def handle_route(*args, **kwargs): + return f(*args, **kwargs) + + return handle_route + + return decorator + + +@define +class StorageIndexUploads(object): + """ + In-progress upload to storage index. + """ + + # Map share number to BucketWriter + shares: dict[int, BucketWriter] = Factory(dict) + + # Map share number to the upload secret (different shares might have + # different upload secrets). + upload_secrets: dict[int, bytes] = Factory(dict) + + +@define +class UploadsInProgress(object): + """ + Keep track of uploads for storage indexes. + """ + + # Map storage index to corresponding uploads-in-progress + _uploads: dict[bytes, StorageIndexUploads] = Factory(dict) + + # Map BucketWriter to (storage index, share number) + _bucketwriters: dict[BucketWriter, Tuple[bytes, int]] = Factory(dict) + + def add_write_bucket( + self, + storage_index: bytes, + share_number: int, + upload_secret: bytes, + bucket: BucketWriter, + ): + """Add a new ``BucketWriter`` to be tracked.""" + si_uploads = self._uploads.setdefault(storage_index, StorageIndexUploads()) + si_uploads.shares[share_number] = bucket + si_uploads.upload_secrets[share_number] = upload_secret + self._bucketwriters[bucket] = (storage_index, share_number) + + def get_write_bucket( + self, storage_index: bytes, share_number: int, upload_secret: bytes + ) -> BucketWriter: + """Get the given in-progress immutable share upload.""" + self.validate_upload_secret(storage_index, share_number, upload_secret) + try: + return self._uploads[storage_index].shares[share_number] + except (KeyError, IndexError): + raise _HTTPError(http.NOT_FOUND) + + def remove_write_bucket(self, bucket: BucketWriter): + """Stop tracking the given ``BucketWriter``.""" + try: + storage_index, share_number = self._bucketwriters.pop(bucket) + except KeyError: + # This is probably a BucketWriter created by Foolscap, so just + # ignore it. + return + uploads_index = self._uploads[storage_index] + uploads_index.shares.pop(share_number) + uploads_index.upload_secrets.pop(share_number) + if not uploads_index.shares: + self._uploads.pop(storage_index) + + def validate_upload_secret( + self, storage_index: bytes, share_number: int, upload_secret: bytes + ): + """ + Raise an unauthorized-HTTP-response exception if the given + storage_index+share_number have a different upload secret than the + given one. + + If the given upload doesn't exist at all, nothing happens. + """ + if storage_index in self._uploads: + in_progress = self._uploads[storage_index] + # For pre-existing upload, make sure password matches. + if share_number in in_progress.upload_secrets and not timing_safe_compare( + in_progress.upload_secrets[share_number], upload_secret + ): + raise _HTTPError(http.UNAUTHORIZED) + + +class StorageIndexConverter(BaseConverter): + """Parser/validator for storage index URL path segments.""" + + regex = "[" + str(rfc3548_alphabet, "ascii") + "]{26}" + + def to_python(self, value): + try: + return si_a2b(value.encode("ascii")) + except (AssertionError, binascii.Error, ValueError): + raise ValidationError("Invalid storage index") + + +class _HTTPError(Exception): + """ + Raise from ``HTTPServer`` endpoint to return the given HTTP response code. + """ + + def __init__(self, code: int): + self.code = code + + +# CDDL schemas. +# +# Tags are of the form #6.nnn, where the number is documented at +# https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml. Notably, #6.258 +# indicates a set. +# +# Somewhat arbitrary limits are set to reduce e.g. number of shares, number of +# vectors, etc.. These may need to be iterated on in future revisions of the +# code. +_SCHEMAS = { + "allocate_buckets": Schema( + """ + request = { + share-numbers: #6.258([0*256 uint]) + allocated-size: uint + } + """ + ), + "advise_corrupt_share": Schema( + """ + request = { + reason: tstr + } + """ + ), + "mutable_read_test_write": Schema( + """ + request = { + "test-write-vectors": { + 0*256 share_number : { + "test": [0*30 {"offset": uint, "size": uint, "specimen": bstr}] + "write": [0*30 {"offset": uint, "data": bstr}] + "new-length": uint / null + } + } + "read-vector": [0*30 {"offset": uint, "size": uint}] + } + share_number = uint + """ + ), +} + + +# Callable that takes offset and length, returns the data at that range. +ReadData = Callable[[int, int], bytes] + + +@implementer(IPullProducer) +@define +class _ReadAllProducer: + """ + Producer that calls a read function repeatedly to read all the data, and + writes to a request. + """ + + request: Request + read_data: ReadData + result: Deferred = Factory(Deferred) + start: int = field(default=0) + + @classmethod + def produce_to(cls, request: Request, read_data: ReadData) -> Deferred: + """ + Create and register the producer, returning ``Deferred`` that should be + returned from a HTTP server endpoint. + """ + producer = cls(request, read_data) + request.registerProducer(producer, False) + return producer.result + + def resumeProducing(self): + data = self.read_data(self.start, 65536) + if not data: + self.request.unregisterProducer() + d = self.result + del self.result + d.callback(b"") + return + self.request.write(data) + self.start += len(data) + + def pauseProducing(self): + pass + + def stopProducing(self): + pass + + +@implementer(IPullProducer) +@define +class _ReadRangeProducer: + """ + Producer that calls a read function to read a range of data, and writes to + a request. + """ + + request: Request + read_data: ReadData + result: Deferred + start: int + remaining: int + + def resumeProducing(self): + to_read = min(self.remaining, 65536) + data = self.read_data(self.start, to_read) + assert len(data) <= to_read + + if not data and self.remaining > 0: + d, self.result = self.result, None + d.errback( + ValueError( + f"Should be {self.remaining} bytes left, but we got an empty read" + ) + ) + self.stopProducing() + return + + if len(data) > self.remaining: + d, self.result = self.result, None + d.errback( + ValueError( + f"Should be {self.remaining} bytes left, but we got more than that ({len(data)})!" + ) + ) + self.stopProducing() + return + + self.start += len(data) + self.remaining -= len(data) + assert self.remaining >= 0 + + self.request.write(data) + + if self.remaining == 0: + self.stopProducing() + + def pauseProducing(self): + pass + + def stopProducing(self): + if self.request is not None: + self.request.unregisterProducer() + self.request = None + if self.result is not None: + d = self.result + self.result = None + d.callback(b"") + + +def read_range( + request: Request, read_data: ReadData, share_length: int +) -> Union[Deferred, bytes]: + """ + Read an optional ``Range`` header, reads data appropriately via the given + callable, writes the data to the request. + + Only parses a subset of ``Range`` headers that we support: must be set, + bytes only, only a single range, the end must be explicitly specified. + Raises a ``_HTTPError(http.REQUESTED_RANGE_NOT_SATISFIABLE)`` if parsing is + not possible or the header isn't set. + + Takes a function that will do the actual reading given the start offset and + a length to read. + + The resulting data is written to the request. + """ + + def read_data_with_error_handling(offset: int, length: int) -> bytes: + try: + return read_data(offset, length) + except _HTTPError as e: + request.setResponseCode(e.code) + # Empty read means we're done. + return b"" + + if request.getHeader("range") is None: + return _ReadAllProducer.produce_to(request, read_data_with_error_handling) + + range_header = parse_range_header(request.getHeader("range")) + if ( + range_header is None # failed to parse + or range_header.units != "bytes" + or len(range_header.ranges) > 1 # more than one range + or range_header.ranges[0][1] is None # range without end + ): + raise _HTTPError(http.REQUESTED_RANGE_NOT_SATISFIABLE) + + offset, end = range_header.ranges[0] + # If we're being ask to read beyond the length of the share, just read + # less: + end = min(end, share_length) + if offset >= end: + # Basically we'd need to return an empty body. However, the + # Content-Range header can't actually represent empty lengths... so + # (mis)use 204 response code to indicate that. + raise _HTTPError(http.NO_CONTENT) + + request.setResponseCode(http.PARTIAL_CONTENT) + + # Actual conversion from Python's exclusive ranges to inclusive ranges is + # handled by werkzeug. + request.setHeader( + "content-range", + ContentRange("bytes", offset, end).to_header(), + ) + + d = Deferred() + request.registerProducer( + _ReadRangeProducer( + request, read_data_with_error_handling, d, offset, end - offset + ), + False, + ) + return d + + +class HTTPServer(object): + """ + A HTTP interface to the storage server. + """ + + _app = Klein() + _app.url_map.converters["storage_index"] = StorageIndexConverter + + @_app.handle_errors(_HTTPError) + def _http_error(self, request, failure): + """Handle ``_HTTPError`` exceptions.""" + request.setResponseCode(failure.value.code) + return b"" + + @_app.handle_errors(CDDLValidationError) + def _cddl_validation_error(self, request, failure): + """Handle CDDL validation errors.""" + request.setResponseCode(http.BAD_REQUEST) + return str(failure.value).encode("utf-8") + + def __init__( + self, storage_server, swissnum + ): # type: (StorageServer, bytes) -> None + self._storage_server = storage_server + self._swissnum = swissnum + # Maps storage index to StorageIndexUploads: + self._uploads = UploadsInProgress() + + # When an upload finishes successfully, gets aborted, or times out, + # make sure it gets removed from our tracking datastructure: + self._storage_server.register_bucket_writer_close_handler( + self._uploads.remove_write_bucket + ) + + def get_resource(self): + """Return twisted.web ``Resource`` for this object.""" + return self._app.resource() + + def _send_encoded(self, request, data): + """ + Return encoded data suitable for writing as the HTTP body response, by + default using CBOR. + + Also sets the appropriate ``Content-Type`` header on the response. + """ + accept_headers = request.requestHeaders.getRawHeaders("accept") or [ + CBOR_MIME_TYPE + ] + accept = parse_accept_header(accept_headers[0]) + if accept.best == CBOR_MIME_TYPE: + request.setHeader("Content-Type", CBOR_MIME_TYPE) + f = TemporaryFile() + dump(data, f) + + def read_data(offset: int, length: int) -> bytes: + f.seek(offset) + return f.read(length) + + return _ReadAllProducer.produce_to(request, read_data) + else: + # TODO Might want to optionally send JSON someday: + # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3861 + raise _HTTPError(http.NOT_ACCEPTABLE) + + def _read_encoded(self, request, schema: Schema) -> Any: + """ + Read encoded request body data, decoding it with CBOR by default. + + Somewhat arbitrarily, limit body size to 1MB; this may be too low, we + may want to customize per query type, but this is the starting point + for now. + """ + content_type = get_content_type(request.requestHeaders) + if content_type == CBOR_MIME_TYPE: + # Read 1 byte more than 1MB. We expect length to be 1MB or + # less; if it's more assume it's not a legitimate message. + message = request.content.read(1024 * 1024 + 1) + if len(message) > 1024 * 1024: + raise _HTTPError(http.REQUEST_ENTITY_TOO_LARGE) + schema.validate_cbor(message) + result = loads(message) + return result + else: + raise _HTTPError(http.UNSUPPORTED_MEDIA_TYPE) + + ##### Generic APIs ##### + + @_authorized_route(_app, set(), "/storage/v1/version", methods=["GET"]) + def version(self, request, authorization): + """Return version information.""" + return self._send_encoded(request, self._storage_server.get_version()) + + ##### Immutable APIs ##### + + @_authorized_route( + _app, + {Secrets.LEASE_RENEW, Secrets.LEASE_CANCEL, Secrets.UPLOAD}, + "/storage/v1/immutable/", + methods=["POST"], + ) + def allocate_buckets(self, request, authorization, storage_index): + """Allocate buckets.""" + upload_secret = authorization[Secrets.UPLOAD] + info = self._read_encoded(request, _SCHEMAS["allocate_buckets"]) + + # We do NOT validate the upload secret for existing bucket uploads. + # Another upload may be happening in parallel, with a different upload + # key. That's fine! If a client tries to _write_ to that upload, they + # need to have an upload key. That does mean we leak the existence of + # these parallel uploads, but if you know storage index you can + # download them once upload finishes, so it's not a big deal to leak + # that information. + + already_got, sharenum_to_bucket = self._storage_server.allocate_buckets( + storage_index, + renew_secret=authorization[Secrets.LEASE_RENEW], + cancel_secret=authorization[Secrets.LEASE_CANCEL], + sharenums=info["share-numbers"], + allocated_size=info["allocated-size"], + ) + for share_number, bucket in sharenum_to_bucket.items(): + self._uploads.add_write_bucket( + storage_index, share_number, upload_secret, bucket + ) + + return self._send_encoded( + request, + {"already-have": set(already_got), "allocated": set(sharenum_to_bucket)}, + ) + + @_authorized_route( + _app, + {Secrets.UPLOAD}, + "/storage/v1/immutable///abort", + methods=["PUT"], + ) + def abort_share_upload(self, request, authorization, storage_index, share_number): + """Abort an in-progress immutable share upload.""" + try: + bucket = self._uploads.get_write_bucket( + storage_index, share_number, authorization[Secrets.UPLOAD] + ) + except _HTTPError as e: + if e.code == http.NOT_FOUND: + # It may be we've already uploaded this, in which case error + # should be method not allowed (405). + try: + self._storage_server.get_buckets(storage_index)[share_number] + except KeyError: + pass + else: + # Already uploaded, so we can't abort. + raise _HTTPError(http.NOT_ALLOWED) + raise + + # Abort the upload; this should close it which will eventually result + # in self._uploads.remove_write_bucket() being called. + bucket.abort() + + return b"" + + @_authorized_route( + _app, + {Secrets.UPLOAD}, + "/storage/v1/immutable//", + methods=["PATCH"], + ) + def write_share_data(self, request, authorization, storage_index, share_number): + """Write data to an in-progress immutable upload.""" + content_range = parse_content_range_header(request.getHeader("content-range")) + if content_range is None or content_range.units != "bytes": + request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE) + return b"" + + bucket = self._uploads.get_write_bucket( + storage_index, share_number, authorization[Secrets.UPLOAD] + ) + offset = content_range.start + remaining = content_range.stop - content_range.start + finished = False + + while remaining > 0: + data = request.content.read(min(remaining, 65536)) + assert data, "uploaded data length doesn't match range" + + try: + finished = bucket.write(offset, data) + except ConflictingWriteError: + request.setResponseCode(http.CONFLICT) + return b"" + remaining -= len(data) + offset += len(data) + + if finished: + bucket.close() + request.setResponseCode(http.CREATED) + else: + request.setResponseCode(http.OK) + + required = [] + for start, end, _ in bucket.required_ranges().ranges(): + required.append({"begin": start, "end": end}) + return self._send_encoded(request, {"required": required}) + + @_authorized_route( + _app, + set(), + "/storage/v1/immutable//shares", + methods=["GET"], + ) + def list_shares(self, request, authorization, storage_index): + """ + List shares for the given storage index. + """ + share_numbers = set(self._storage_server.get_buckets(storage_index).keys()) + return self._send_encoded(request, share_numbers) + + @_authorized_route( + _app, + set(), + "/storage/v1/immutable//", + methods=["GET"], + ) + def read_share_chunk(self, request, authorization, storage_index, share_number): + """Read a chunk for an already uploaded immutable.""" + try: + bucket = self._storage_server.get_buckets(storage_index)[share_number] + except KeyError: + request.setResponseCode(http.NOT_FOUND) + return b"" + + return read_range(request, bucket.read, bucket.get_length()) + + @_authorized_route( + _app, + {Secrets.LEASE_RENEW, Secrets.LEASE_CANCEL}, + "/storage/v1/lease/", + methods=["PUT"], + ) + def add_or_renew_lease(self, request, authorization, storage_index): + """Update the lease for an immutable or mutable share.""" + if not list(self._storage_server.get_shares(storage_index)): + raise _HTTPError(http.NOT_FOUND) + + # Checking of the renewal secret is done by the backend. + self._storage_server.add_lease( + storage_index, + authorization[Secrets.LEASE_RENEW], + authorization[Secrets.LEASE_CANCEL], + ) + + request.setResponseCode(http.NO_CONTENT) + return b"" + + @_authorized_route( + _app, + set(), + "/storage/v1/immutable///corrupt", + methods=["POST"], + ) + def advise_corrupt_share_immutable( + self, request, authorization, storage_index, share_number + ): + """Indicate that given share is corrupt, with a text reason.""" + try: + bucket = self._storage_server.get_buckets(storage_index)[share_number] + except KeyError: + raise _HTTPError(http.NOT_FOUND) + + info = self._read_encoded(request, _SCHEMAS["advise_corrupt_share"]) + bucket.advise_corrupt_share(info["reason"].encode("utf-8")) + return b"" + + ##### Mutable APIs ##### + + @_authorized_route( + _app, + {Secrets.LEASE_RENEW, Secrets.LEASE_CANCEL, Secrets.WRITE_ENABLER}, + "/storage/v1/mutable//read-test-write", + methods=["POST"], + ) + def mutable_read_test_write(self, request, authorization, storage_index): + """Read/test/write combined operation for mutables.""" + rtw_request = self._read_encoded(request, _SCHEMAS["mutable_read_test_write"]) + secrets = ( + authorization[Secrets.WRITE_ENABLER], + authorization[Secrets.LEASE_RENEW], + authorization[Secrets.LEASE_CANCEL], + ) + try: + success, read_data = self._storage_server.slot_testv_and_readv_and_writev( + storage_index, + secrets, + { + k: ( + [ + (d["offset"], d["size"], b"eq", d["specimen"]) + for d in v["test"] + ], + [(d["offset"], d["data"]) for d in v["write"]], + v["new-length"], + ) + for (k, v) in rtw_request["test-write-vectors"].items() + }, + [(d["offset"], d["size"]) for d in rtw_request["read-vector"]], + ) + except BadWriteEnablerError: + raise _HTTPError(http.UNAUTHORIZED) + return self._send_encoded(request, {"success": success, "data": read_data}) + + @_authorized_route( + _app, + set(), + "/storage/v1/mutable//", + methods=["GET"], + ) + def read_mutable_chunk(self, request, authorization, storage_index, share_number): + """Read a chunk from a mutable.""" + + try: + share_length = self._storage_server.get_mutable_share_length( + storage_index, share_number + ) + except KeyError: + raise _HTTPError(http.NOT_FOUND) + + def read_data(offset, length): + try: + return self._storage_server.slot_readv( + storage_index, [share_number], [(offset, length)] + )[share_number][0] + except KeyError: + raise _HTTPError(http.NOT_FOUND) + + return read_range(request, read_data, share_length) + + @_authorized_route( + _app, + set(), + "/storage/v1/mutable//shares", + methods=["GET"], + ) + def enumerate_mutable_shares(self, request, authorization, storage_index): + """List mutable shares for a storage index.""" + shares = self._storage_server.enumerate_mutable_shares(storage_index) + return self._send_encoded(request, shares) + + @_authorized_route( + _app, + set(), + "/storage/v1/mutable///corrupt", + methods=["POST"], + ) + def advise_corrupt_share_mutable( + self, request, authorization, storage_index, share_number + ): + """Indicate that given share is corrupt, with a text reason.""" + if share_number not in { + shnum for (shnum, _) in self._storage_server.get_shares(storage_index) + }: + raise _HTTPError(http.NOT_FOUND) + + info = self._read_encoded(request, _SCHEMAS["advise_corrupt_share"]) + self._storage_server.advise_corrupt_share( + b"mutable", storage_index, share_number, info["reason"].encode("utf-8") + ) + return b"" + + +@implementer(IStreamServerEndpoint) +@define +class _TLSEndpointWrapper(object): + """ + Wrap an existing endpoint with the server-side storage TLS policy. This is + useful because not all Tahoe-LAFS endpoints might be plain TCP+TLS, for + example there's Tor and i2p. + """ + + endpoint: IStreamServerEndpoint + context_factory: CertificateOptions + + @classmethod + def from_paths( + cls, endpoint, private_key_path: FilePath, cert_path: FilePath + ) -> "_TLSEndpointWrapper": + """ + Create an endpoint with the given private key and certificate paths on + the filesystem. + """ + certificate = Certificate.loadPEM(cert_path.getContent()).original + private_key = PrivateCertificate.loadPEM( + cert_path.getContent() + b"\n" + private_key_path.getContent() + ).privateKey.original + certificate_options = CertificateOptions( + privateKey=private_key, certificate=certificate + ) + return cls(endpoint=endpoint, context_factory=certificate_options) + + def listen(self, factory): + return self.endpoint.listen( + TLSMemoryBIOFactory(self.context_factory, False, factory) + ) + + +def build_nurl( + hostname: str, port: int, swissnum: str, certificate: CryptoCertificate +) -> DecodedURL: + """ + Construct a HTTPS NURL, given the hostname, port, server swissnum, and x509 + certificate for the server. Clients can then connect to the server using + this NURL. + """ + return DecodedURL().replace( + fragment="v=1", # how we know this NURL is HTTP-based (i.e. not Foolscap) + host=hostname, + port=port, + path=(swissnum,), + userinfo=( + str( + get_spki_hash(certificate), + "ascii", + ), + ), + scheme="pb", + ) + + +def listen_tls( + server: HTTPServer, + hostname: str, + endpoint: IStreamServerEndpoint, + private_key_path: FilePath, + cert_path: FilePath, +) -> Deferred[Tuple[DecodedURL, IListeningPort]]: + """ + Start a HTTPS storage server on the given port, return the NURL and the + listening port. + + The hostname is the external IP or hostname clients will connect to, used + to constrtuct the NURL; it does not modify what interfaces the server + listens on. + + This will likely need to be updated eventually to handle Tor/i2p. + """ + endpoint = _TLSEndpointWrapper.from_paths(endpoint, private_key_path, cert_path) + + def get_nurl(listening_port: IListeningPort) -> DecodedURL: + address = cast(Union[IPv4Address, IPv6Address], listening_port.getHost()) + return build_nurl( + hostname, + address.port, + str(server._swissnum, "ascii"), + load_pem_x509_certificate(cert_path.getContent()), + ) + + return endpoint.listen(Site(server.get_resource())).addCallback( + lambda listening_port: (get_nurl(listening_port), listening_port) + ) diff --git a/src/allmydata/storage/immutable.py b/src/allmydata/storage/immutable.py index b8b18f140..0893513ae 100644 --- a/src/allmydata/storage/immutable.py +++ b/src/allmydata/storage/immutable.py @@ -21,27 +21,32 @@ from zope.interface import implementer from allmydata.interfaces import ( RIBucketWriter, RIBucketReader, ConflictingWriteError, DataTooLargeError, + NoSpace, ) from allmydata.util import base32, fileutil, log from allmydata.util.assertutil import precondition -from allmydata.util.hashutil import timing_safe_compare -from allmydata.storage.lease import LeaseInfo from allmydata.storage.common import UnknownImmutableContainerVersionError +from .immutable_schema import ( + NEWEST_SCHEMA_VERSION, + schema_from_version, +) + + # each share file (in storage/shares/$SI/$SHNUM) contains lease information # and share data. The share data is accessed by RIBucketWriter.write and # RIBucketReader.read . The lease information is not accessible through these # interfaces. # The share file has the following layout: -# 0x00: share file version number, four bytes, current version is 1 +# 0x00: share file version number, four bytes, current version is 2 # 0x04: share data length, four bytes big-endian = A # See Footnote 1 below. # 0x08: number of leases, four bytes big-endian # 0x0c: beginning of share data (see immutable.layout.WriteBucketProxy) # A+0x0c = B: first lease. Lease format is: # B+0x00: owner number, 4 bytes big-endian, 0 is reserved for no-owner -# B+0x04: renew secret, 32 bytes (SHA256) -# B+0x24: cancel secret, 32 bytes (SHA256) +# B+0x04: renew secret, 32 bytes (SHA256 + blake2b) # See Footnote 2 below. +# B+0x24: cancel secret, 32 bytes (SHA256 + blake2b) # B+0x44: expiration time, 4 bytes big-endian seconds-since-epoch # B+0x48: next lease, or end of record @@ -53,13 +58,126 @@ from allmydata.storage.common import UnknownImmutableContainerVersionError # then the value stored in this field will be the actual share data length # modulo 2**32. +# Footnote 2: The change between share file version number 1 and 2 is that +# storage of lease secrets is changed from plaintext to hashed. This change +# protects the secrets from compromises of local storage on the server: if a +# plaintext cancel secret is somehow exfiltrated from the storage server, an +# attacker could use it to cancel that lease and potentially cause user data +# to be discarded before intended by the real owner. As of this comment, +# lease cancellation is disabled because there have been at least two bugs +# which leak the persisted value of the cancellation secret. If lease secrets +# were stored hashed instead of plaintext then neither of these bugs would +# have allowed an attacker to learn a usable cancel secret. +# +# Clients are free to construct these secrets however they like. The +# Tahoe-LAFS client uses a SHA256-based construction. The server then uses +# blake2b to hash these values for storage so that it retains no persistent +# copy of the original secret. +# + +def _fix_lease_count_format(lease_count_format): + """ + Turn a single character struct format string into a format string suitable + for use in encoding and decoding the lease count value inside a share + file, if possible. + + :param str lease_count_format: A single character format string like + ``"B"`` or ``"L"``. + + :raise ValueError: If the given format string is not suitable for use + encoding and decoding a lease count. + + :return str: A complete format string which can safely be used to encode + and decode lease counts in a share file. + """ + if len(lease_count_format) != 1: + raise ValueError( + "Cannot construct ShareFile with lease_count_format={!r}; " + "format must accept a single value".format( + lease_count_format, + ), + ) + # Make it big-endian with standard size so all platforms agree on the + # result. + fixed = ">" + lease_count_format + if struct.calcsize(fixed) > 4: + # There is only room for at most 4 bytes in the share file format so + # we can't allow any larger formats. + raise ValueError( + "Cannot construct ShareFile with lease_count_format={!r}; " + "size must be smaller than size of '>L'".format( + lease_count_format, + ), + ) + return fixed + + class ShareFile(object): + """ + Support interaction with persistent storage of a share. + + :ivar str _lease_count_format: The format string which is used to encode + and decode the lease count inside the share file. As stated in the + comment in this module there is room for at most 4 bytes in this part + of the file. A format string that works on fewer bytes is allowed to + restrict the number of leases allowed in the share file to a smaller + number than could be supported by using the full 4 bytes. This is + mostly of interest for testing. + """ LEASE_SIZE = struct.calcsize(">L32s32sL") sharetype = "immutable" - def __init__(self, filename, max_size=None, create=False): - """ If max_size is not None then I won't allow more than max_size to be written to me. If create=True and max_size must not be None. """ + @classmethod + def is_valid_header(cls, header): + # type: (bytes) -> bool + """ + Determine if the given bytes constitute a valid header for this type of + container. + + :param header: Some bytes from the beginning of a container. + + :return: ``True`` if the bytes could belong to this container, + ``False`` otherwise. + """ + (version,) = struct.unpack(">L", header[:4]) + return schema_from_version(version) is not None + + def __init__( + self, + filename, + max_size=None, + create=False, + lease_count_format="L", + schema=NEWEST_SCHEMA_VERSION, + ): + """ + Initialize a ``ShareFile``. + + :param Optional[int] max_size: If given, the maximum number of bytes + that this ``ShareFile`` will accept to be stored. + + :param bool create: If ``True``, create the file (and fail if it + exists already). ``max_size`` must not be ``None`` in this case. + If ``False``, open an existing file for reading. + + :param str lease_count_format: A format character to use to encode and + decode the number of leases in the share file. There are only 4 + bytes available in the file so the format must be 4 bytes or + smaller. If different formats are used at different times with + the same share file, the result will likely be nonsense. + + This parameter is intended for the test suite to use to be able to + exercise values near the maximum encodeable value without having + to create billions of leases. + + :raise ValueError: If the encoding of ``lease_count_format`` is too + large or if it is not a single format character. + """ + precondition((max_size is not None) or (not create), max_size, create) + + self._lease_count_format = _fix_lease_count_format(lease_count_format) + self._lease_count_size = struct.calcsize(self._lease_count_format) self.home = filename self._max_size = max_size if create: @@ -67,31 +185,30 @@ class ShareFile(object): # it. Also construct the metadata. assert not os.path.exists(self.home) fileutil.make_dirs(os.path.dirname(self.home)) - # The second field -- the four-byte share data length -- is no - # longer used as of Tahoe v1.3.0, but we continue to write it in - # there in case someone downgrades a storage server from >= - # Tahoe-1.3.0 to < Tahoe-1.3.0, or moves a share file from one - # server to another, etc. We do saturation -- a share data length - # larger than 2**32-1 (what can fit into the field) is marked as - # the largest length that can fit into the field. That way, even - # if this does happen, the old < v1.3.0 server will still allow - # clients to read the first part of the share. + self._schema = schema with open(self.home, 'wb') as f: - f.write(struct.pack(">LLL", 1, min(2**32-1, max_size), 0)) + f.write(self._schema.header(max_size)) self._lease_offset = max_size + 0x0c self._num_leases = 0 else: with open(self.home, 'rb') as f: filesize = os.path.getsize(self.home) (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) - if version != 1: - msg = "sharefile %s had version %d but we wanted 1" % \ - (filename, version) - raise UnknownImmutableContainerVersionError(msg) + self._schema = schema_from_version(version) + if self._schema is None: + raise UnknownImmutableContainerVersionError(filename, version) self._num_leases = num_leases self._lease_offset = filesize - (num_leases * self.LEASE_SIZE) + self._length = filesize - 0xc - (num_leases * self.LEASE_SIZE) + self._data_offset = 0xc + def get_length(self): + """ + Return the length of the data in the share, if we're reading. + """ + return self._length + def unlink(self): os.unlink(self.home) @@ -122,16 +239,25 @@ class ShareFile(object): offset = self._lease_offset + lease_number * self.LEASE_SIZE f.seek(offset) assert f.tell() == offset - f.write(lease_info.to_immutable_data()) + f.write(self._schema.lease_serializer.serialize(lease_info)) def _read_num_leases(self, f): f.seek(0x08) - (num_leases,) = struct.unpack(">L", f.read(4)) + (num_leases,) = struct.unpack( + self._lease_count_format, + f.read(self._lease_count_size), + ) return num_leases def _write_num_leases(self, f, num_leases): + self._write_encoded_num_leases( + f, + struct.pack(self._lease_count_format, num_leases), + ) + + def _write_encoded_num_leases(self, f, encoded_num_leases): f.seek(0x08) - f.write(struct.pack(">L", num_leases)) + f.write(encoded_num_leases) def _truncate_leases(self, f, num_leases): f.truncate(self._lease_offset + num_leases * self.LEASE_SIZE) @@ -144,34 +270,63 @@ class ShareFile(object): for i in range(num_leases): data = f.read(self.LEASE_SIZE) if data: - yield LeaseInfo().from_immutable_data(data) + yield self._schema.lease_serializer.unserialize(data) def add_lease(self, lease_info): with open(self.home, 'rb+') as f: num_leases = self._read_num_leases(f) + # Before we write the new lease record, make sure we can encode + # the new lease count. + new_lease_count = struct.pack(self._lease_count_format, num_leases + 1) self._write_lease_record(f, num_leases, lease_info) - self._write_num_leases(f, num_leases+1) + self._write_encoded_num_leases(f, new_lease_count) - def renew_lease(self, renew_secret, new_expire_time): + def renew_lease(self, renew_secret, new_expire_time, allow_backdate=False): + # type: (bytes, int, bool) -> None + """ + Update the expiration time on an existing lease. + + :param allow_backdate: If ``True`` then allow the new expiration time + to be before the current expiration time. Otherwise, make no + change when this is the case. + + :raise IndexError: If there is no lease matching the given renew + secret. + """ for i,lease in enumerate(self.get_leases()): - if timing_safe_compare(lease.renew_secret, renew_secret): + if lease.is_renew_secret(renew_secret): # yup. See if we need to update the owner time. - if new_expire_time > lease.expiration_time: + if allow_backdate or new_expire_time > lease.get_expiration_time(): # yes - lease.expiration_time = new_expire_time + lease = lease.renew(new_expire_time) with open(self.home, 'rb+') as f: self._write_lease_record(f, i, lease) return raise IndexError("unable to renew non-existent lease") - def add_or_renew_lease(self, lease_info): + def add_or_renew_lease(self, available_space, lease_info): + """ + Renew an existing lease if possible, otherwise allocate a new one. + + :param int available_space: The maximum number of bytes of storage to + commit in this operation. If more than this number of bytes is + required, raise ``NoSpace`` instead. + + :param LeaseInfo lease_info: The details of the lease to renew or add. + + :raise NoSpace: If more than ``available_space`` bytes is required to + complete the operation. In this case, no lease is added. + + :return: ``None`` + """ try: self.renew_lease(lease_info.renew_secret, - lease_info.expiration_time) + lease_info.get_expiration_time()) except IndexError: + if lease_info.immutable_size() > available_space: + raise NoSpace() self.add_lease(lease_info) - def cancel_lease(self, cancel_secret): """Remove a lease with the given cancel_secret. If the last lease is cancelled, the file will be removed. Return the number of bytes that @@ -183,7 +338,7 @@ class ShareFile(object): leases = list(self.get_leases()) num_leases_removed = 0 for i,lease in enumerate(leases): - if timing_safe_compare(lease.cancel_secret, cancel_secret): + if lease.is_cancel_secret(cancel_secret): leases[i] = None num_leases_removed += 1 if not num_leases_removed: @@ -205,10 +360,12 @@ class ShareFile(object): return space_freed -@implementer(RIBucketWriter) -class BucketWriter(Referenceable): # type: ignore # warner/foolscap#78 +class BucketWriter(object): + """ + Keep track of the process of writing to a ShareFile. + """ - def __init__(self, ss, incominghome, finalhome, max_size, lease_info): + def __init__(self, ss, incominghome, finalhome, max_size, lease_info, clock): self.ss = ss self.incominghome = incominghome self.finalhome = finalhome @@ -220,15 +377,34 @@ class BucketWriter(Referenceable): # type: ignore # warner/foolscap#78 # added by simultaneous uploaders self._sharefile.add_lease(lease_info) self._already_written = RangeMap() + self._clock = clock + self._timeout = clock.callLater(30 * 60, self._abort_due_to_timeout) + + def required_ranges(self): # type: () -> RangeMap + """ + Return which ranges still need to be written. + """ + result = RangeMap() + result.set(True, 0, self._max_size) + for start, end, _ in self._already_written.ranges(): + result.delete(start, end) + return result def allocated_size(self): return self._max_size - def remote_write(self, offset, data): - start = time.time() + def write(self, offset, data): # type: (int, bytes) -> bool + """ + Write data at given offset, return whether the upload is complete. + """ + # Delay the timeout, since we received data; if we get an + # AlreadyCancelled error, that means there's a bug in the client and + # write() was called after close(). + self._timeout.reset(30 * 60) + start = self._clock.seconds() precondition(not self.closed) if self.throw_out_all_data: - return + return False # Make sure we're not conflicting with existing data: end = offset + len(data) @@ -243,12 +419,23 @@ class BucketWriter(Referenceable): # type: ignore # warner/foolscap#78 self._sharefile.write_share_data(offset, data) self._already_written.set(True, offset, end) - self.ss.add_latency("write", time.time() - start) + self.ss.add_latency("write", self._clock.seconds() - start) self.ss.count("write") + return self._is_finished() - def remote_close(self): + def _is_finished(self): + """ + Return whether the whole thing has been written. + """ + return sum([mr.stop - mr.start for mr in self._already_written.ranges()]) == self._max_size + + def close(self): + # This can't actually be enabled, because it's not backwards compatible + # with old Foolscap clients. + # assert self._is_finished() precondition(not self.closed) - start = time.time() + self._timeout.cancel() + start = self._clock.seconds() fileutil.make_dirs(os.path.dirname(self.finalhome)) fileutil.rename(self.incominghome, self.finalhome) @@ -281,20 +468,25 @@ class BucketWriter(Referenceable): # type: ignore # warner/foolscap#78 filelen = os.stat(self.finalhome)[stat.ST_SIZE] self.ss.bucket_writer_closed(self, filelen) - self.ss.add_latency("close", time.time() - start) + self.ss.add_latency("close", self._clock.seconds() - start) self.ss.count("close") def disconnected(self): if not self.closed: - self._abort() + self.abort() - def remote_abort(self): + def _abort_due_to_timeout(self): + """ + Called if we run out of time. + """ + log.msg("storage: aborting sharefile %s due to timeout" % self.incominghome, + facility="tahoe.storage", level=log.UNUSUAL) + self.abort() + + def abort(self): log.msg("storage: aborting sharefile %s" % self.incominghome, facility="tahoe.storage", level=log.UNUSUAL) - self._abort() self.ss.count("abort") - - def _abort(self): if self.closed: return @@ -312,9 +504,33 @@ class BucketWriter(Referenceable): # type: ignore # warner/foolscap#78 self.closed = True self.ss.bucket_writer_closed(self, 0) + # Cancel timeout if it wasn't already cancelled. + if self._timeout.active(): + self._timeout.cancel() -@implementer(RIBucketReader) -class BucketReader(Referenceable): # type: ignore # warner/foolscap#78 + +@implementer(RIBucketWriter) +class FoolscapBucketWriter(Referenceable): # type: ignore # warner/foolscap#78 + """ + Foolscap-specific BucketWriter. + """ + def __init__(self, bucket_writer): + self._bucket_writer = bucket_writer + + def remote_write(self, offset, data): + self._bucket_writer.write(offset, data) + + def remote_close(self): + return self._bucket_writer.close() + + def remote_abort(self): + return self._bucket_writer.abort() + + +class BucketReader(object): + """ + Manage the process for reading from a ``ShareFile``. + """ def __init__(self, ss, sharefname, storage_index=None, shnum=None): self.ss = ss @@ -329,15 +545,37 @@ class BucketReader(Referenceable): # type: ignore # warner/foolscap#78 ), self.shnum) - def remote_read(self, offset, length): + def read(self, offset, length): start = time.time() data = self._share_file.read_share_data(offset, length) self.ss.add_latency("read", time.time() - start) self.ss.count("read") return data + def advise_corrupt_share(self, reason): + return self.ss.advise_corrupt_share(b"immutable", + self.storage_index, + self.shnum, + reason) + + def get_length(self): + """ + Return the length of the data in the share. + """ + return self._share_file.get_length() + + +@implementer(RIBucketReader) +class FoolscapBucketReader(Referenceable): # type: ignore # warner/foolscap#78 + """ + Foolscap wrapper for ``BucketReader`` + """ + + def __init__(self, bucket_reader): + self._bucket_reader = bucket_reader + + def remote_read(self, offset, length): + return self._bucket_reader.read(offset, length) + def remote_advise_corrupt_share(self, reason): - return self.ss.remote_advise_corrupt_share(b"immutable", - self.storage_index, - self.shnum, - reason) + return self._bucket_reader.advise_corrupt_share(reason) diff --git a/src/allmydata/storage/immutable_schema.py b/src/allmydata/storage/immutable_schema.py new file mode 100644 index 000000000..40663b935 --- /dev/null +++ b/src/allmydata/storage/immutable_schema.py @@ -0,0 +1,72 @@ +""" +Ported to Python 3. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 + +import struct + +import attr + +from .lease_schema import ( + v1_immutable, + v2_immutable, +) + +@attr.s(frozen=True) +class _Schema(object): + """ + Implement encoding and decoding for multiple versions of the immutable + container schema. + + :ivar int version: the version number of the schema this object supports + + :ivar lease_serializer: an object that is responsible for lease + serialization and unserialization + """ + version = attr.ib() + lease_serializer = attr.ib() + + def header(self, max_size): + # type: (int) -> bytes + """ + Construct a container header. + + :param max_size: the maximum size the container can hold + + :return: the header bytes + """ + # The second field -- the four-byte share data length -- is no longer + # used as of Tahoe v1.3.0, but we continue to write it in there in + # case someone downgrades a storage server from >= Tahoe-1.3.0 to < + # Tahoe-1.3.0, or moves a share file from one server to another, + # etc. We do saturation -- a share data length larger than 2**32-1 + # (what can fit into the field) is marked as the largest length that + # can fit into the field. That way, even if this does happen, the old + # < v1.3.0 server will still allow clients to read the first part of + # the share. + return struct.pack(">LLL", self.version, min(2**32 - 1, max_size), 0) + +ALL_SCHEMAS = { + _Schema(version=2, lease_serializer=v2_immutable), + _Schema(version=1, lease_serializer=v1_immutable), +} +ALL_SCHEMA_VERSIONS = {schema.version for schema in ALL_SCHEMAS} +NEWEST_SCHEMA_VERSION = max(ALL_SCHEMAS, key=lambda schema: schema.version) + +def schema_from_version(version): + # (int) -> Optional[type] + """ + Find the schema object that corresponds to a certain version number. + """ + for schema in ALL_SCHEMAS: + if schema.version == version: + return schema + return None diff --git a/src/allmydata/storage/lease.py b/src/allmydata/storage/lease.py index 187f32406..c056a7d28 100644 --- a/src/allmydata/storage/lease.py +++ b/src/allmydata/storage/lease.py @@ -13,52 +13,375 @@ if PY2: import struct, time +import attr + +from zope.interface import ( + Interface, + implementer, +) + +from twisted.python.components import ( + proxyForInterface, +) + +from allmydata.util.hashutil import timing_safe_compare +from allmydata.util import base32 + +# struct format for representation of a lease in an immutable share +IMMUTABLE_FORMAT = ">L32s32sL" + +# struct format for representation of a lease in a mutable share +MUTABLE_FORMAT = ">LL32s32s20s" + + +class ILeaseInfo(Interface): + """ + Represent a marker attached to a share that indicates that share should be + retained for some amount of time. + + Typically clients will create and renew leases on their shares as a way to + inform storage servers that there is still interest in those shares. A + share may have more than one lease. If all leases on a share have + expiration times in the past then the storage server may take this as a + strong hint that no one is interested in the share anymore and therefore + the share may be deleted to reclaim the space. + """ + def renew(new_expire_time): + """ + Create a new ``ILeaseInfo`` with the given expiration time. + + :param Union[int, float] new_expire_time: The expiration time the new + ``ILeaseInfo`` will have. + + :return: The new ``ILeaseInfo`` provider with the new expiration time. + """ + + def get_expiration_time(): + """ + :return Union[int, float]: this lease's expiration time + """ + + def get_grant_renew_time_time(): + """ + :return Union[int, float]: a guess about the last time this lease was + renewed + """ + + def get_age(): + """ + :return Union[int, float]: a guess about how long it has been since this + lease was renewed + """ + + def to_immutable_data(): + """ + :return bytes: a serialized representation of this lease suitable for + inclusion in an immutable container + """ + + def to_mutable_data(): + """ + :return bytes: a serialized representation of this lease suitable for + inclusion in a mutable container + """ + + def immutable_size(): + """ + :return int: the size of the serialized representation of this lease in an + immutable container + """ + + def mutable_size(): + """ + :return int: the size of the serialized representation of this lease in a + mutable container + """ + + def is_renew_secret(candidate_secret): + """ + :return bool: ``True`` if the given byte string is this lease's renew + secret, ``False`` otherwise + """ + + def present_renew_secret(): + """ + :return str: Text which could reasonably be shown to a person representing + this lease's renew secret. + """ + + def is_cancel_secret(candidate_secret): + """ + :return bool: ``True`` if the given byte string is this lease's cancel + secret, ``False`` otherwise + """ + + def present_cancel_secret(): + """ + :return str: Text which could reasonably be shown to a person representing + this lease's cancel secret. + """ + + +@implementer(ILeaseInfo) +@attr.s(frozen=True) class LeaseInfo(object): - def __init__(self, owner_num=None, renew_secret=None, cancel_secret=None, - expiration_time=None, nodeid=None): - self.owner_num = owner_num - self.renew_secret = renew_secret - self.cancel_secret = cancel_secret - self.expiration_time = expiration_time - if nodeid is not None: - assert isinstance(nodeid, bytes) - assert len(nodeid) == 20 - self.nodeid = nodeid + """ + Represent the details of one lease, a marker which is intended to inform + the storage server how long to store a particular share. + """ + owner_num = attr.ib(default=None) + + # Don't put secrets into the default string representation. This makes it + # slightly less likely the secrets will accidentally be leaked to + # someplace they're not meant to be. + renew_secret = attr.ib(default=None, repr=False) + cancel_secret = attr.ib(default=None, repr=False) + + _expiration_time = attr.ib(default=None) + + nodeid = attr.ib(default=None) + + @nodeid.validator + def _validate_nodeid(self, attribute, value): + if value is not None: + if not isinstance(value, bytes): + raise ValueError( + "nodeid value must be bytes, not {!r}".format(value), + ) + if len(value) != 20: + raise ValueError( + "nodeid value must be 20 bytes long, not {!r}".format(value), + ) + return None def get_expiration_time(self): - return self.expiration_time + # type: () -> float + """ + Retrieve a POSIX timestamp representing the time at which this lease is + set to expire. + """ + return self._expiration_time + + def renew(self, new_expire_time): + # type: (float) -> LeaseInfo + """ + Create a new lease the same as this one but with a new expiration time. + + :param new_expire_time: The new expiration time. + + :return: The new lease info. + """ + return attr.assoc( + self, + _expiration_time=new_expire_time, + ) + + def is_renew_secret(self, candidate_secret): + # type: (bytes) -> bool + """ + Check a string to see if it is the correct renew secret. + + :return: ``True`` if it is the correct renew secret, ``False`` + otherwise. + """ + return timing_safe_compare(self.renew_secret, candidate_secret) + + def present_renew_secret(self): + # type: () -> str + """ + Return the renew secret, base32-encoded. + """ + return str(base32.b2a(self.renew_secret), "utf-8") + + def is_cancel_secret(self, candidate_secret): + # type: (bytes) -> bool + """ + Check a string to see if it is the correct cancel secret. + + :return: ``True`` if it is the correct cancel secret, ``False`` + otherwise. + """ + return timing_safe_compare(self.cancel_secret, candidate_secret) + + def present_cancel_secret(self): + # type: () -> str + """ + Return the cancel secret, base32-encoded. + """ + return str(base32.b2a(self.cancel_secret), "utf-8") def get_grant_renew_time_time(self): # hack, based upon fixed 31day expiration period - return self.expiration_time - 31*24*60*60 + return self._expiration_time - 31*24*60*60 def get_age(self): return time.time() - self.get_grant_renew_time_time() - def from_immutable_data(self, data): - (self.owner_num, - self.renew_secret, - self.cancel_secret, - self.expiration_time) = struct.unpack(">L32s32sL", data) - self.nodeid = None - return self + @classmethod + def from_immutable_data(cls, data): + """ + Create a new instance from the encoded data given. + + :param data: A lease serialized using the immutable-share-file format. + """ + names = [ + "owner_num", + "renew_secret", + "cancel_secret", + "expiration_time", + ] + values = struct.unpack(IMMUTABLE_FORMAT, data) + return cls(nodeid=None, **dict(zip(names, values))) + + def immutable_size(self): + """ + :return int: The size, in bytes, of the representation of this lease in an + immutable share file. + """ + return struct.calcsize(IMMUTABLE_FORMAT) + + def mutable_size(self): + """ + :return int: The size, in bytes, of the representation of this lease in a + mutable share file. + """ + return struct.calcsize(MUTABLE_FORMAT) def to_immutable_data(self): - return struct.pack(">L32s32sL", + return struct.pack(IMMUTABLE_FORMAT, self.owner_num, self.renew_secret, self.cancel_secret, - int(self.expiration_time)) + int(self._expiration_time)) def to_mutable_data(self): - return struct.pack(">LL32s32s20s", + return struct.pack(MUTABLE_FORMAT, self.owner_num, - int(self.expiration_time), + int(self._expiration_time), self.renew_secret, self.cancel_secret, self.nodeid) - def from_mutable_data(self, data): - (self.owner_num, - self.expiration_time, - self.renew_secret, self.cancel_secret, - self.nodeid) = struct.unpack(">LL32s32s20s", data) - return self + @classmethod + def from_mutable_data(cls, data): + """ + Create a new instance from the encoded data given. + + :param data: A lease serialized using the mutable-share-file format. + """ + names = [ + "owner_num", + "expiration_time", + "renew_secret", + "cancel_secret", + "nodeid", + ] + values = struct.unpack(MUTABLE_FORMAT, data) + return cls(**dict(zip(names, values))) + + +@attr.s(frozen=True) +class HashedLeaseInfo(proxyForInterface(ILeaseInfo, "_lease_info")): # type: ignore # unsupported dynamic base class + """ + A ``HashedLeaseInfo`` wraps lease information in which the secrets have + been hashed. + """ + _lease_info = attr.ib() + _hash = attr.ib() + + # proxyForInterface will take care of forwarding all methods on ILeaseInfo + # to `_lease_info`. Here we override a few of those methods to adjust + # their behavior to make them suitable for use with hashed secrets. + + def renew(self, new_expire_time): + # Preserve the HashedLeaseInfo wrapper around the renewed LeaseInfo. + return attr.assoc( + self, + _lease_info=super(HashedLeaseInfo, self).renew(new_expire_time), + ) + + def is_renew_secret(self, candidate_secret): + # type: (bytes) -> bool + """ + Hash the candidate secret and compare the result to the stored hashed + secret. + """ + return super(HashedLeaseInfo, self).is_renew_secret(self._hash(candidate_secret)) + + def present_renew_secret(self): + # type: () -> str + """ + Present the hash of the secret with a marker indicating it is a hash. + """ + return u"hash:" + super(HashedLeaseInfo, self).present_renew_secret() + + def is_cancel_secret(self, candidate_secret): + # type: (bytes) -> bool + """ + Hash the candidate secret and compare the result to the stored hashed + secret. + """ + if isinstance(candidate_secret, _HashedCancelSecret): + # Someone read it off of this object in this project - probably + # the lease crawler - and is just trying to use it to identify + # which lease it wants to operate on. Avoid re-hashing the value. + # + # It is important that this codepath is only availably internally + # for this process to talk to itself. If it were to be exposed to + # clients over the network, they could just provide the hashed + # value to avoid having to ever learn the original value. + hashed_candidate = candidate_secret.hashed_value + else: + # It is not yet hashed so hash it. + hashed_candidate = self._hash(candidate_secret) + + return super(HashedLeaseInfo, self).is_cancel_secret(hashed_candidate) + + def present_cancel_secret(self): + # type: () -> str + """ + Present the hash of the secret with a marker indicating it is a hash. + """ + return u"hash:" + super(HashedLeaseInfo, self).present_cancel_secret() + + @property + def owner_num(self): + return self._lease_info.owner_num + + @property + def nodeid(self): + return self._lease_info.nodeid + + @property + def cancel_secret(self): + """ + Give back an opaque wrapper around the hashed cancel secret which can + later be presented for a succesful equality comparison. + """ + # We don't *have* the cancel secret. We hashed it and threw away the + # original. That's good. It does mean that some code that runs + # in-process with the storage service (LeaseCheckingCrawler) runs into + # some difficulty. That code wants to cancel leases and does so using + # the same interface that faces storage clients (or would face them, + # if lease cancellation were exposed). + # + # Since it can't use the hashed secret to cancel a lease (that's the + # point of the hashing) and we don't have the unhashed secret to give + # it, instead we give it a marker that `cancel_lease` will recognize. + # On recognizing it, if the hashed value given matches the hashed + # value stored it is considered a match and the lease can be + # cancelled. + # + # This isn't great. Maybe the internal and external consumers of + # cancellation should use different interfaces. + return _HashedCancelSecret(self._lease_info.cancel_secret) + + +@attr.s(frozen=True) +class _HashedCancelSecret(object): + """ + ``_HashedCancelSecret`` is a marker type for an already-hashed lease + cancel secret that lets internal lease cancellers bypass the hash-based + protection that's imposed on external lease cancellers. + + :ivar bytes hashed_value: The already-hashed secret. + """ + hashed_value = attr.ib() diff --git a/src/allmydata/storage/lease_schema.py b/src/allmydata/storage/lease_schema.py new file mode 100644 index 000000000..7e604388e --- /dev/null +++ b/src/allmydata/storage/lease_schema.py @@ -0,0 +1,138 @@ +""" +Ported to Python 3. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 + +try: + from typing import Union +except ImportError: + pass + +import attr + +from nacl.hash import blake2b +from nacl.encoding import RawEncoder + +from .lease import ( + LeaseInfo, + HashedLeaseInfo, +) + +@attr.s(frozen=True) +class CleartextLeaseSerializer(object): + """ + Serialize and unserialize leases with cleartext secrets. + """ + _to_data = attr.ib() + _from_data = attr.ib() + + def serialize(self, lease): + # type: (LeaseInfo) -> bytes + """ + Represent the given lease as bytes with cleartext secrets. + """ + if isinstance(lease, LeaseInfo): + return self._to_data(lease) + raise ValueError( + "ShareFile v1 schema only supports LeaseInfo, not {!r}".format( + lease, + ), + ) + + def unserialize(self, data): + # type: (bytes) -> LeaseInfo + """ + Load a lease with cleartext secrets from the given bytes representation. + """ + # In v1 of the immutable schema lease secrets are stored plaintext. + # So load the data into a plain LeaseInfo which works on plaintext + # secrets. + return self._from_data(data) + +@attr.s(frozen=True) +class HashedLeaseSerializer(object): + _to_data = attr.ib() + _from_data = attr.ib() + + @classmethod + def _hash_secret(cls, secret): + # type: (bytes) -> bytes + """ + Hash a lease secret for storage. + """ + return blake2b(secret, digest_size=32, encoder=RawEncoder()) + + @classmethod + def _hash_lease_info(cls, lease_info): + # type: (LeaseInfo) -> HashedLeaseInfo + """ + Hash the cleartext lease info secrets into a ``HashedLeaseInfo``. + """ + if not isinstance(lease_info, LeaseInfo): + # Provide a little safety against misuse, especially an attempt to + # re-hash an already-hashed lease info which is represented as a + # different type. + raise TypeError( + "Can only hash LeaseInfo, not {!r}".format(lease_info), + ) + + # Hash the cleartext secrets in the lease info and wrap the result in + # a new type. + return HashedLeaseInfo( + attr.assoc( + lease_info, + renew_secret=cls._hash_secret(lease_info.renew_secret), + cancel_secret=cls._hash_secret(lease_info.cancel_secret), + ), + cls._hash_secret, + ) + + def serialize(self, lease): + # type: (Union[LeaseInfo, HashedLeaseInfo]) -> bytes + if isinstance(lease, LeaseInfo): + # v2 of the immutable schema stores lease secrets hashed. If + # we're given a LeaseInfo then it holds plaintext secrets. Hash + # them before trying to serialize. + lease = self._hash_lease_info(lease) + if isinstance(lease, HashedLeaseInfo): + return self._to_data(lease) + raise ValueError( + "ShareFile v2 schema cannot represent lease {!r}".format( + lease, + ), + ) + + def unserialize(self, data): + # type: (bytes) -> HashedLeaseInfo + # In v2 of the immutable schema lease secrets are stored hashed. Wrap + # a LeaseInfo in a HashedLeaseInfo so it can supply the correct + # interpretation for those values. + return HashedLeaseInfo(self._from_data(data), self._hash_secret) + +v1_immutable = CleartextLeaseSerializer( + LeaseInfo.to_immutable_data, + LeaseInfo.from_immutable_data, +) + +v2_immutable = HashedLeaseSerializer( + HashedLeaseInfo.to_immutable_data, + LeaseInfo.from_immutable_data, +) + +v1_mutable = CleartextLeaseSerializer( + LeaseInfo.to_mutable_data, + LeaseInfo.from_mutable_data, +) + +v2_mutable = HashedLeaseSerializer( + HashedLeaseInfo.to_mutable_data, + LeaseInfo.from_mutable_data, +) diff --git a/src/allmydata/storage/mutable.py b/src/allmydata/storage/mutable.py index 2ef0c3215..51c3a3c8b 100644 --- a/src/allmydata/storage/mutable.py +++ b/src/allmydata/storage/mutable.py @@ -13,7 +13,10 @@ if PY2: import os, stat, struct -from allmydata.interfaces import BadWriteEnablerError +from allmydata.interfaces import ( + BadWriteEnablerError, + NoSpace, +) from allmydata.util import idlib, log from allmydata.util.assertutil import precondition from allmydata.util.hashutil import timing_safe_compare @@ -21,7 +24,10 @@ from allmydata.storage.lease import LeaseInfo from allmydata.storage.common import UnknownMutableContainerVersionError, \ DataTooLargeError from allmydata.mutable.layout import MAX_MUTABLE_SHARE_SIZE - +from .mutable_schema import ( + NEWEST_SCHEMA_VERSION, + schema_from_header, +) # the MutableShareFile is like the ShareFile, but used for mutable data. It # has a different layout. See docs/mutable.txt for more details. @@ -61,26 +67,34 @@ class MutableShareFile(object): # our sharefiles share with a recognizable string, plus some random # binary data to reduce the chance that a regular text file will look # like a sharefile. - MAGIC = b"Tahoe mutable container v1\n" + b"\x75\x09\x44\x03\x8e" - assert len(MAGIC) == 32 - assert isinstance(MAGIC, bytes) MAX_SIZE = MAX_MUTABLE_SHARE_SIZE # TODO: decide upon a policy for max share size - def __init__(self, filename, parent=None): + @classmethod + def is_valid_header(cls, header): + # type: (bytes) -> bool + """ + Determine if the given bytes constitute a valid header for this type of + container. + + :param header: Some bytes from the beginning of a container. + + :return: ``True`` if the bytes could belong to this container, + ``False`` otherwise. + """ + return schema_from_header(header) is not None + + def __init__(self, filename, parent=None, schema=NEWEST_SCHEMA_VERSION): self.home = filename if os.path.exists(self.home): # we don't cache anything, just check the magic with open(self.home, 'rb') as f: - data = f.read(self.HEADER_SIZE) - (magic, - write_enabler_nodeid, write_enabler, - data_length, extra_least_offset) = \ - struct.unpack(">32s20s32sQQ", data) - if magic != self.MAGIC: - msg = "sharefile %s had magic '%r' but we wanted '%r'" % \ - (filename, magic, self.MAGIC) - raise UnknownMutableContainerVersionError(msg) + header = f.read(self.HEADER_SIZE) + self._schema = schema_from_header(header) + if self._schema is None: + raise UnknownMutableContainerVersionError(filename, header) + else: + self._schema = schema self.parent = parent # for logging def log(self, *args, **kwargs): @@ -88,23 +102,8 @@ class MutableShareFile(object): def create(self, my_nodeid, write_enabler): assert not os.path.exists(self.home) - data_length = 0 - extra_lease_offset = (self.HEADER_SIZE - + 4 * self.LEASE_SIZE - + data_length) - assert extra_lease_offset == self.DATA_OFFSET # true at creation - num_extra_leases = 0 with open(self.home, 'wb') as f: - header = struct.pack( - ">32s20s32sQQ", - self.MAGIC, my_nodeid, write_enabler, - data_length, extra_lease_offset, - ) - leases = (b"\x00" * self.LEASE_SIZE) * 4 - f.write(header + leases) - # data goes here, empty after creation - f.write(struct.pack(">L", num_extra_leases)) - # extra leases go here, none at creation + f.write(self._schema.header(my_nodeid, write_enabler)) def unlink(self): os.unlink(self.home) @@ -120,6 +119,7 @@ class MutableShareFile(object): def _read_share_data(self, f, offset, length): precondition(offset >= 0) + precondition(length >= 0) data_length = self._read_data_length(f) if offset+length > data_length: # reads beyond the end of the data are truncated. Reads that @@ -236,7 +236,7 @@ class MutableShareFile(object): + (lease_number-4)*self.LEASE_SIZE) f.seek(offset) assert f.tell() == offset - f.write(lease_info.to_mutable_data()) + f.write(self._schema.lease_serializer.serialize(lease_info)) def _read_lease_record(self, f, lease_number): # returns a LeaseInfo instance, or None @@ -253,7 +253,7 @@ class MutableShareFile(object): f.seek(offset) assert f.tell() == offset data = f.read(self.LEASE_SIZE) - lease_info = LeaseInfo().from_mutable_data(data) + lease_info = self._schema.lease_serializer.unserialize(data) if lease_info.owner_num == 0: return None return lease_info @@ -288,7 +288,19 @@ class MutableShareFile(object): except IndexError: return - def add_lease(self, lease_info): + def add_lease(self, available_space, lease_info): + """ + Add a new lease to this share. + + :param int available_space: The maximum number of bytes of storage to + commit in this operation. If more than this number of bytes is + required, raise ``NoSpace`` instead. + + :raise NoSpace: If more than ``available_space`` bytes is required to + complete the operation. In this case, no lease is added. + + :return: ``None`` + """ precondition(lease_info.owner_num != 0) # 0 means "no lease here" with open(self.home, 'rb+') as f: num_lease_slots = self._get_num_lease_slots(f) @@ -296,17 +308,30 @@ class MutableShareFile(object): if empty_slot is not None: self._write_lease_record(f, empty_slot, lease_info) else: + if lease_info.mutable_size() > available_space: + raise NoSpace() self._write_lease_record(f, num_lease_slots, lease_info) - def renew_lease(self, renew_secret, new_expire_time): + def renew_lease(self, renew_secret, new_expire_time, allow_backdate=False): + # type: (bytes, int, bool) -> None + """ + Update the expiration time on an existing lease. + + :param allow_backdate: If ``True`` then allow the new expiration time + to be before the current expiration time. Otherwise, make no + change when this is the case. + + :raise IndexError: If there is no lease matching the given renew + secret. + """ accepting_nodeids = set() with open(self.home, 'rb+') as f: for (leasenum,lease) in self._enumerate_leases(f): - if timing_safe_compare(lease.renew_secret, renew_secret): + if lease.is_renew_secret(renew_secret): # yup. See if we need to update the owner time. - if new_expire_time > lease.expiration_time: + if allow_backdate or new_expire_time > lease.get_expiration_time(): # yes - lease.expiration_time = new_expire_time + lease = lease.renew(new_expire_time) self._write_lease_record(f, leasenum, lease) return accepting_nodeids.add(lease.nodeid) @@ -320,13 +345,13 @@ class MutableShareFile(object): msg += " ." raise IndexError(msg) - def add_or_renew_lease(self, lease_info): + def add_or_renew_lease(self, available_space, lease_info): precondition(lease_info.owner_num != 0) # 0 means "no lease here" try: self.renew_lease(lease_info.renew_secret, - lease_info.expiration_time) + lease_info.get_expiration_time()) except IndexError: - self.add_lease(lease_info) + self.add_lease(available_space, lease_info) def cancel_lease(self, cancel_secret): """Remove any leases with the given cancel_secret. If the last lease @@ -346,7 +371,7 @@ class MutableShareFile(object): with open(self.home, 'rb+') as f: for (leasenum,lease) in self._enumerate_leases(f): accepting_nodeids.add(lease.nodeid) - if timing_safe_compare(lease.cancel_secret, cancel_secret): + if lease.is_cancel_secret(cancel_secret): self._write_lease_record(f, leasenum, blank_lease) modified += 1 else: @@ -377,7 +402,7 @@ class MutableShareFile(object): write_enabler_nodeid, write_enabler, data_length, extra_least_offset) = \ struct.unpack(">32s20s32sQQ", data) - assert magic == self.MAGIC + assert self.is_valid_header(data) return (write_enabler, write_enabler_nodeid) def readv(self, readv): @@ -387,11 +412,14 @@ class MutableShareFile(object): datav.append(self._read_share_data(f, offset, length)) return datav -# def remote_get_length(self): -# f = open(self.home, 'rb') -# data_length = self._read_data_length(f) -# f.close() -# return data_length + def get_length(self): + """ + Return the length of the data in the share. + """ + f = open(self.home, 'rb') + data_length = self._read_data_length(f) + f.close() + return data_length def check_write_enabler(self, write_enabler, si_s): with open(self.home, 'rb+') as f: @@ -454,4 +482,3 @@ def create_mutable_sharefile(filename, my_nodeid, write_enabler, parent): ms.create(my_nodeid, write_enabler) del ms return MutableShareFile(filename, parent) - diff --git a/src/allmydata/storage/mutable_schema.py b/src/allmydata/storage/mutable_schema.py new file mode 100644 index 000000000..4be0d2137 --- /dev/null +++ b/src/allmydata/storage/mutable_schema.py @@ -0,0 +1,144 @@ +""" +Ported to Python 3. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 + +import struct + +import attr + +from ..util.hashutil import ( + tagged_hash, +) +from .lease import ( + LeaseInfo, +) +from .lease_schema import ( + v1_mutable, + v2_mutable, +) + +def _magic(version): + # type: (int) -> bytes + """ + Compute a "magic" header string for a container of the given version. + + :param version: The version number of the container. + """ + # Make it easy for people to recognize + human_readable = u"Tahoe mutable container v{:d}\n".format(version).encode("ascii") + # But also keep the chance of accidental collision low + if version == 1: + # It's unclear where this byte sequence came from. It may have just + # been random. In any case, preserve it since it is the magic marker + # in all v1 share files. + random_bytes = b"\x75\x09\x44\x03\x8e" + else: + # For future versions, use a reproducable scheme. + random_bytes = tagged_hash( + b"allmydata_mutable_container_header", + human_readable, + truncate_to=5, + ) + magic = human_readable + random_bytes + assert len(magic) == 32 + if version > 1: + # The chance of collision is pretty low but let's just be sure about + # it. + assert magic != _magic(version - 1) + + return magic + +def _header(magic, extra_lease_offset, nodeid, write_enabler): + # type: (bytes, int, bytes, bytes) -> bytes + """ + Construct a container header. + + :param nodeid: A unique identifier for the node holding this + container. + + :param write_enabler: A secret shared with the client used to + authorize changes to the contents of this container. + """ + fixed_header = struct.pack( + ">32s20s32sQQ", + magic, + nodeid, + write_enabler, + # data length, initially the container is empty + 0, + extra_lease_offset, + ) + blank_leases = b"\x00" * LeaseInfo().mutable_size() * 4 + extra_lease_count = struct.pack(">L", 0) + + return b"".join([ + fixed_header, + # share data will go in between the next two items eventually but + # for now there is none. + blank_leases, + extra_lease_count, + ]) + + +_HEADER_FORMAT = ">32s20s32sQQ" + +# This size excludes leases +_HEADER_SIZE = struct.calcsize(_HEADER_FORMAT) + +_EXTRA_LEASE_OFFSET = _HEADER_SIZE + 4 * LeaseInfo().mutable_size() + + +@attr.s(frozen=True) +class _Schema(object): + """ + Implement encoding and decoding for the mutable container. + + :ivar int version: the version number of the schema this object supports + + :ivar lease_serializer: an object that is responsible for lease + serialization and unserialization + """ + version = attr.ib() + lease_serializer = attr.ib() + _magic = attr.ib() + + @classmethod + def for_version(cls, version, lease_serializer): + return cls(version, lease_serializer, magic=_magic(version)) + + def magic_matches(self, candidate_magic): + # type: (bytes) -> bool + """ + Return ``True`` if a candidate string matches the expected magic string + from a mutable container header, ``False`` otherwise. + """ + return candidate_magic[:len(self._magic)] == self._magic + + def header(self, nodeid, write_enabler): + return _header(self._magic, _EXTRA_LEASE_OFFSET, nodeid, write_enabler) + +ALL_SCHEMAS = { + _Schema.for_version(version=2, lease_serializer=v2_mutable), + _Schema.for_version(version=1, lease_serializer=v1_mutable), +} +ALL_SCHEMA_VERSIONS = {schema.version for schema in ALL_SCHEMAS} +NEWEST_SCHEMA_VERSION = max(ALL_SCHEMAS, key=lambda schema: schema.version) + +def schema_from_header(header): + # (int) -> Optional[type] + """ + Find the schema object that corresponds to a certain version number. + """ + for schema in ALL_SCHEMAS: + if schema.magic_matches(header): + return schema + return None diff --git a/src/allmydata/storage/server.py b/src/allmydata/storage/server.py index 041783a4e..2bf99d74c 100644 --- a/src/allmydata/storage/server.py +++ b/src/allmydata/storage/server.py @@ -1,25 +1,16 @@ """ Ported to Python 3. """ -from __future__ import division -from __future__ import absolute_import -from __future__ import print_function -from __future__ import unicode_literals +from __future__ import annotations +from future.utils import bytes_to_native_str +from typing import Dict, Tuple, Iterable -from future.utils import bytes_to_native_str, PY2 -if PY2: - # Omit open() to get native behavior where open("w") always accepts native - # strings. Omit bytes so we don't leak future's custom bytes. - from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, pow, round, super, dict, list, object, range, str, max, min # noqa: F401 -else: - from typing import Dict - -import os, re, struct, time -import six +import os, re from foolscap.api import Referenceable from foolscap.ipb import IRemoteReference from twisted.application import service +from twisted.internet import reactor from zope.interface import implementer from allmydata.interfaces import RIStorageServer, IStatsProducer @@ -32,7 +23,10 @@ from allmydata.storage.lease import LeaseInfo from allmydata.storage.mutable import MutableShareFile, EmptyShare, \ create_mutable_sharefile from allmydata.mutable.layout import MAX_MUTABLE_SHARE_SIZE -from allmydata.storage.immutable import ShareFile, BucketWriter, BucketReader +from allmydata.storage.immutable import ( + ShareFile, BucketWriter, BucketReader, FoolscapBucketWriter, + FoolscapBucketReader, +) from allmydata.storage.crawler import BucketCountingCrawler from allmydata.storage.expirer import LeaseCheckingCrawler @@ -55,9 +49,13 @@ NUM_RE=re.compile("^[0-9]+$") DEFAULT_RENEWAL_TIME = 31 * 24 * 60 * 60 -@implementer(RIStorageServer, IStatsProducer) -class StorageServer(service.MultiService, Referenceable): +@implementer(IStatsProducer) +class StorageServer(service.MultiService): + """ + Implement the business logic for the storage server. + """ name = 'storage' + # only the tests change this to anything else LeaseCheckerClass = LeaseCheckingCrawler def __init__(self, storedir, nodeid, reserved_space=0, @@ -68,7 +66,7 @@ class StorageServer(service.MultiService, Referenceable): expiration_override_lease_duration=None, expiration_cutoff_date=None, expiration_sharetypes=("mutable", "immutable"), - get_current_time=time.time): + clock=reactor): service.MultiService.__init__(self) assert isinstance(nodeid, bytes) assert len(nodeid) == 20 @@ -78,9 +76,9 @@ class StorageServer(service.MultiService, Referenceable): sharedir = os.path.join(storedir, "shares") fileutil.make_dirs(sharedir) self.sharedir = sharedir - # we don't actually create the corruption-advisory dir until necessary self.corruption_advisory_dir = os.path.join(storedir, "corruption-advisories") + fileutil.make_dirs(self.corruption_advisory_dir) self.reserved_space = int(reserved_space) self.no_storage = discard_storage self.readonly_storage = readonly_storage @@ -119,18 +117,19 @@ class StorageServer(service.MultiService, Referenceable): expiration_cutoff_date, expiration_sharetypes) self.lease_checker.setServiceParent(self) - self._get_current_time = get_current_time - - # Currently being-written Bucketwriters. For Foolscap, lifetime is tied - # to connection: when disconnection happens, the BucketWriters are - # removed. For HTTP, this makes no sense, so there will be - # timeout-based cleanup; see - # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3807. + self._clock = clock # Map in-progress filesystem path -> BucketWriter: self._bucket_writers = {} # type: Dict[str,BucketWriter] - # Canaries and disconnect markers for BucketWriters created via Foolscap: - self._bucket_writer_disconnect_markers = {} # type: Dict[BucketWriter,(IRemoteReference, object)] + + # These callables will be called with BucketWriters that closed: + self._call_on_bucket_writer_close = [] + + def stopService(self): + # Cancel any in-progress uploads: + for bw in list(self._bucket_writers.values()): + bw.disconnected() + return service.MultiService.stopService(self) def __repr__(self): return "" % (idlib.shortnodeid_b2a(self.my_nodeid),) @@ -253,7 +252,7 @@ class StorageServer(service.MultiService, Referenceable): space += bw.allocated_size() return space - def remote_get_version(self): + def get_version(self): remaining_space = self.get_available_space() if remaining_space is None: # We're on a platform that has no API to get disk stats. @@ -274,19 +273,24 @@ class StorageServer(service.MultiService, Referenceable): } return version - def _allocate_buckets(self, storage_index, + def allocate_buckets(self, storage_index, renew_secret, cancel_secret, sharenums, allocated_size, - owner_num=0): + owner_num=0, renew_leases=True): """ Generic bucket allocation API. + + :param bool renew_leases: If and only if this is ``True`` then renew a + secret-matching lease on (or, if none match, add a new lease to) + existing shares in this bucket. Any *new* shares are given a new + lease regardless. """ # owner_num is not for clients to set, but rather it should be # curried into the PersonalStorageServer instance that is dedicated # to a particular owner. - start = self._get_current_time() + start = self._clock.seconds() self.count("allocate") - alreadygot = set() + alreadygot = {} bucketwriters = {} # k: shnum, v: BucketWriter si_dir = storage_index_to_dir(storage_index) si_s = si_b2a(storage_index) @@ -297,7 +301,7 @@ class StorageServer(service.MultiService, Referenceable): # goes into the share files themselves. It could also be put into a # separate database. Note that the lease should not be added until # the BucketWriter has been closed. - expire_time = self._get_current_time() + DEFAULT_RENEWAL_TIME + expire_time = self._clock.seconds() + DEFAULT_RENEWAL_TIME lease_info = LeaseInfo(owner_num, renew_secret, cancel_secret, expire_time, self.my_nodeid) @@ -317,10 +321,10 @@ class StorageServer(service.MultiService, Referenceable): # they asked about: this will save them a lot of work. Add or update # leases for all of them: if they want us to hold shares for this # file, they'll want us to hold leases for this file. - for (shnum, fn) in self._get_bucket_shares(storage_index): - alreadygot.add(shnum) - sf = ShareFile(fn) - sf.add_or_renew_lease(lease_info) + for (shnum, fn) in self.get_shares(storage_index): + alreadygot[shnum] = ShareFile(fn) + if renew_leases: + self._add_or_renew_leases(alreadygot.values(), lease_info) for shnum in sharenums: incominghome = os.path.join(self.incomingdir, si_dir, "%d" % shnum) @@ -337,8 +341,12 @@ class StorageServer(service.MultiService, Referenceable): elif (not limited) or (remaining_space >= max_space_per_bucket): # ok! we need to create the new share file. bw = BucketWriter(self, incominghome, finalhome, - max_space_per_bucket, lease_info) + max_space_per_bucket, lease_info, + clock=self._clock) if self.no_storage: + # Really this should be done by having a separate class for + # this situation; see + # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3862 bw.throw_out_all_data = True bucketwriters[shnum] = bw self._bucket_writers[incominghome] = bw @@ -351,61 +359,47 @@ class StorageServer(service.MultiService, Referenceable): if bucketwriters: fileutil.make_dirs(os.path.join(self.sharedir, si_dir)) - self.add_latency("allocate", self._get_current_time() - start) - return alreadygot, bucketwriters - - def remote_allocate_buckets(self, storage_index, - renew_secret, cancel_secret, - sharenums, allocated_size, - canary, owner_num=0): - """Foolscap-specific ``allocate_buckets()`` API.""" - alreadygot, bucketwriters = self._allocate_buckets( - storage_index, renew_secret, cancel_secret, sharenums, allocated_size, - owner_num=owner_num, - ) - # Abort BucketWriters if disconnection happens. - for bw in bucketwriters.values(): - disconnect_marker = canary.notifyOnDisconnect(bw.disconnected) - self._bucket_writer_disconnect_markers[bw] = (canary, disconnect_marker) - return alreadygot, bucketwriters + self.add_latency("allocate", self._clock.seconds() - start) + return set(alreadygot), bucketwriters def _iter_share_files(self, storage_index): - for shnum, filename in self._get_bucket_shares(storage_index): + for shnum, filename in self.get_shares(storage_index): with open(filename, 'rb') as f: header = f.read(32) - if header[:32] == MutableShareFile.MAGIC: + if MutableShareFile.is_valid_header(header): sf = MutableShareFile(filename, self) # note: if the share has been migrated, the renew_lease() # call will throw an exception, with information to help the # client update the lease. - elif header[:4] == struct.pack(">L", 1): + elif ShareFile.is_valid_header(header): sf = ShareFile(filename) else: continue # non-sharefile yield sf - def remote_add_lease(self, storage_index, renew_secret, cancel_secret, - owner_num=1): - start = self._get_current_time() + def add_lease(self, storage_index, renew_secret, cancel_secret, owner_num=1): + start = self._clock.seconds() self.count("add-lease") - new_expire_time = self._get_current_time() + DEFAULT_RENEWAL_TIME + new_expire_time = self._clock.seconds() + DEFAULT_RENEWAL_TIME lease_info = LeaseInfo(owner_num, renew_secret, cancel_secret, new_expire_time, self.my_nodeid) - for sf in self._iter_share_files(storage_index): - sf.add_or_renew_lease(lease_info) - self.add_latency("add-lease", self._get_current_time() - start) + self._add_or_renew_leases( + self._iter_share_files(storage_index), + lease_info, + ) + self.add_latency("add-lease", self._clock.seconds() - start) return None - def remote_renew_lease(self, storage_index, renew_secret): - start = self._get_current_time() + def renew_lease(self, storage_index, renew_secret): + start = self._clock.seconds() self.count("renew") - new_expire_time = self._get_current_time() + DEFAULT_RENEWAL_TIME + new_expire_time = self._clock.seconds() + DEFAULT_RENEWAL_TIME found_buckets = False for sf in self._iter_share_files(storage_index): found_buckets = True sf.renew_lease(renew_secret, new_expire_time) - self.add_latency("renew", self._get_current_time() - start) + self.add_latency("renew", self._clock.seconds() - start) if not found_buckets: raise IndexError("no such lease to renew") @@ -413,14 +407,21 @@ class StorageServer(service.MultiService, Referenceable): if self.stats_provider: self.stats_provider.count('storage_server.bytes_added', consumed_size) del self._bucket_writers[bw.incominghome] - if bw in self._bucket_writer_disconnect_markers: - canary, disconnect_marker = self._bucket_writer_disconnect_markers.pop(bw) - canary.dontNotifyOnDisconnect(disconnect_marker) + for handler in self._call_on_bucket_writer_close: + handler(bw) - def _get_bucket_shares(self, storage_index): - """Return a list of (shnum, pathname) tuples for files that hold + def register_bucket_writer_close_handler(self, handler): + """ + The handler will be called with any ``BucketWriter`` that closes. + """ + self._call_on_bucket_writer_close.append(handler) + + def get_shares(self, storage_index) -> Iterable[tuple[int, str]]: + """ + Return an iterable of (shnum, pathname) tuples for files that hold shares for this storage_index. In each tuple, 'shnum' will always be - the integer form of the last component of 'pathname'.""" + the integer form of the last component of 'pathname'. + """ storagedir = os.path.join(self.sharedir, storage_index_to_dir(storage_index)) try: for f in os.listdir(storagedir): @@ -431,16 +432,19 @@ class StorageServer(service.MultiService, Referenceable): # Commonly caused by there being no buckets at all. pass - def remote_get_buckets(self, storage_index): - start = self._get_current_time() + def get_buckets(self, storage_index): + """ + Get ``BucketReaders`` for an immutable. + """ + start = self._clock.seconds() self.count("get") si_s = si_b2a(storage_index) log.msg("storage: get_buckets %r" % si_s) bucketreaders = {} # k: sharenum, v: BucketReader - for shnum, filename in self._get_bucket_shares(storage_index): + for shnum, filename in self.get_shares(storage_index): bucketreaders[shnum] = BucketReader(self, filename, storage_index, shnum) - self.add_latency("get", self._get_current_time() - start) + self.add_latency("get", self._clock.seconds() - start) return bucketreaders def get_leases(self, storage_index): @@ -454,7 +458,7 @@ class StorageServer(service.MultiService, Referenceable): # since all shares get the same lease data, we just grab the leases # from the first share try: - shnum, filename = next(self._get_bucket_shares(storage_index)) + shnum, filename = next(self.get_shares(storage_index)) sf = ShareFile(filename) return sf.get_leases() except StopIteration: @@ -468,7 +472,7 @@ class StorageServer(service.MultiService, Referenceable): :return: An iterable of the leases attached to this slot. """ - for _, share_filename in self._get_bucket_shares(storage_index): + for _, share_filename in self.get_shares(storage_index): share = MutableShareFile(share_filename) return share.get_leases() return [] @@ -579,10 +583,8 @@ class StorageServer(service.MultiService, Referenceable): else: if sharenum not in shares: # allocate a new share - allocated_size = 2000 # arbitrary, really share = self._allocate_slot_share(bucketdir, secrets, sharenum, - allocated_size, owner_num=0) shares[sharenum] = share shares[sharenum].writev(datav, new_length) @@ -601,7 +603,7 @@ class StorageServer(service.MultiService, Referenceable): :return LeaseInfo: Information for a new lease for a share. """ ownerid = 1 # TODO - expire_time = self._get_current_time() + DEFAULT_RENEWAL_TIME + expire_time = self._clock.seconds() + DEFAULT_RENEWAL_TIME lease_info = LeaseInfo(ownerid, renew_secret, cancel_secret, expire_time, self.my_nodeid) @@ -611,13 +613,13 @@ class StorageServer(service.MultiService, Referenceable): """ Put the given lease onto the given shares. - :param dict[int, MutableShareFile] shares: The shares to put the lease - onto. + :param Iterable[Union[MutableShareFile, ShareFile]] shares: The shares + to put the lease onto. :param LeaseInfo lease_info: The lease to put on the shares. """ - for share in six.viewvalues(shares): - share.add_or_renew_lease(lease_info) + for share in shares: + share.add_or_renew_lease(self.get_available_space(), lease_info) def slot_testv_and_readv_and_writev( # type: ignore # warner/foolscap#78 self, @@ -625,19 +627,21 @@ class StorageServer(service.MultiService, Referenceable): secrets, test_and_write_vectors, read_vector, - renew_leases, + renew_leases=True, ): """ Read data from shares and conditionally write some data to them. :param bool renew_leases: If and only if this is ``True`` and the test - vectors pass then shares in this slot will also have an updated - lease applied to them. + vectors pass then shares mentioned in ``test_and_write_vectors`` + that still exist after the changes are made will also have a + secret-matching lease renewed (or, if none match, a new lease + added). See ``allmydata.interfaces.RIStorageServer`` for details about other parameters and return value. """ - start = self._get_current_time() + start = self._clock.seconds() self.count("writev") si_s = si_b2a(storage_index) log.msg("storage: slot_writev %r" % si_s) @@ -675,26 +679,14 @@ class StorageServer(service.MultiService, Referenceable): ) if renew_leases: lease_info = self._make_lease_info(renew_secret, cancel_secret) - self._add_or_renew_leases(remaining_shares, lease_info) + self._add_or_renew_leases(remaining_shares.values(), lease_info) # all done - self.add_latency("writev", self._get_current_time() - start) + self.add_latency("writev", self._clock.seconds() - start) return (testv_is_good, read_data) - def remote_slot_testv_and_readv_and_writev(self, storage_index, - secrets, - test_and_write_vectors, - read_vector): - return self.slot_testv_and_readv_and_writev( - storage_index, - secrets, - test_and_write_vectors, - read_vector, - renew_leases=True, - ) - def _allocate_slot_share(self, bucketdir, secrets, sharenum, - allocated_size, owner_num=0): + owner_num=0): (write_enabler, renew_secret, cancel_secret) = secrets my_nodeid = self.my_nodeid fileutil.make_dirs(bucketdir) @@ -703,8 +695,23 @@ class StorageServer(service.MultiService, Referenceable): self) return share - def remote_slot_readv(self, storage_index, shares, readv): - start = self._get_current_time() + def enumerate_mutable_shares(self, storage_index: bytes) -> set[int]: + """Return all share numbers for the given mutable.""" + si_dir = storage_index_to_dir(storage_index) + # shares exist if there is a file for them + bucketdir = os.path.join(self.sharedir, si_dir) + if not os.path.isdir(bucketdir): + return set() + result = set() + for sharenum_s in os.listdir(bucketdir): + try: + result.add(int(sharenum_s)) + except ValueError: + continue + return result + + def slot_readv(self, storage_index, shares, readv): + start = self._clock.seconds() self.count("readv") si_s = si_b2a(storage_index) lp = log.msg("storage: slot_readv %r %r" % (si_s, shares), @@ -713,7 +720,7 @@ class StorageServer(service.MultiService, Referenceable): # shares exist if there is a file for them bucketdir = os.path.join(self.sharedir, si_dir) if not os.path.isdir(bucketdir): - self.add_latency("readv", self._get_current_time() - start) + self.add_latency("readv", self._clock.seconds() - start) return {} datavs = {} for sharenum_s in os.listdir(bucketdir): @@ -727,33 +734,212 @@ class StorageServer(service.MultiService, Referenceable): datavs[sharenum] = msf.readv(readv) log.msg("returning shares %s" % (list(datavs.keys()),), facility="tahoe.storage", level=log.NOISY, parent=lp) - self.add_latency("readv", self._get_current_time() - start) + self.add_latency("readv", self._clock.seconds() - start) return datavs - def remote_advise_corrupt_share(self, share_type, storage_index, shnum, - reason): - # This is a remote API, I believe, so this has to be bytes for legacy - # protocol backwards compatibility reasons. + def _share_exists(self, storage_index, shnum): + """ + Check local share storage to see if a matching share exists. + + :param bytes storage_index: The storage index to inspect. + :param int shnum: The share number to check for. + + :return bool: ``True`` if a share with the given number exists at the + given storage index, ``False`` otherwise. + """ + for existing_sharenum, ignored in self.get_shares(storage_index): + if existing_sharenum == shnum: + return True + return False + + def advise_corrupt_share(self, share_type, storage_index, shnum, + reason): + # Previously this had to be bytes for legacy protocol backwards + # compatibility reasons. Now that Foolscap layer has been abstracted + # out, we can probably refactor this to be unicode... assert isinstance(share_type, bytes) assert isinstance(reason, bytes), "%r is not bytes" % (reason,) - fileutil.make_dirs(self.corruption_advisory_dir) - now = time_format.iso_utc(sep="T") + si_s = si_b2a(storage_index) - # windows can't handle colons in the filename - fn = os.path.join( - self.corruption_advisory_dir, - ("%s--%s-%d" % (now, str(si_s, "utf-8"), shnum)).replace(":","") - ) - with open(fn, "w") as f: - f.write("report: Share Corruption\n") - f.write("type: %s\n" % bytes_to_native_str(share_type)) - f.write("storage_index: %s\n" % bytes_to_native_str(si_s)) - f.write("share_number: %d\n" % shnum) - f.write("\n") - f.write(bytes_to_native_str(reason)) - f.write("\n") + + if not self._share_exists(storage_index, shnum): + log.msg( + format=( + "discarding client corruption claim for %(si)s/%(shnum)d " + "which I do not have" + ), + si=si_s, + shnum=shnum, + ) + return + log.msg(format=("client claims corruption in (%(share_type)s) " + "%(si)s-%(shnum)d: %(reason)s"), share_type=share_type, si=si_s, shnum=shnum, reason=reason, level=log.SCARY, umid="SGx2fA") + + report = render_corruption_report(share_type, si_s, shnum, reason) + if len(report) > self.get_available_space(): + return None + + now = time_format.iso_utc(sep="T") + report_path = get_corruption_report_path( + self.corruption_advisory_dir, + now, + si_s, + shnum, + ) + with open(report_path, "w", encoding="utf-8") as f: + f.write(report) + return None + + def get_immutable_share_length(self, storage_index: bytes, share_number: int) -> int: + """Returns the length (in bytes) of an immutable.""" + si_dir = storage_index_to_dir(storage_index) + path = os.path.join(self.sharedir, si_dir, str(share_number)) + return ShareFile(path).get_length() + + def get_mutable_share_length(self, storage_index: bytes, share_number: int) -> int: + """Returns the length (in bytes) of a mutable.""" + si_dir = storage_index_to_dir(storage_index) + path = os.path.join(self.sharedir, si_dir, str(share_number)) + if not os.path.exists(path): + raise KeyError("No such storage index or share number") + return MutableShareFile(path).get_length() + + +@implementer(RIStorageServer) +class FoolscapStorageServer(Referenceable): # type: ignore # warner/foolscap#78 + """ + A filesystem-based implementation of ``RIStorageServer``. + + For Foolscap, BucketWriter lifetime is tied to connection: when + disconnection happens, the BucketWriters are removed. + """ + name = 'storage' + + def __init__(self, storage_server): # type: (StorageServer) -> None + self._server = storage_server + + # Canaries and disconnect markers for BucketWriters created via Foolscap: + self._bucket_writer_disconnect_markers = {} # type: Dict[BucketWriter,Tuple[IRemoteReference, object]] + + self._server.register_bucket_writer_close_handler(self._bucket_writer_closed) + + def _bucket_writer_closed(self, bw): + if bw in self._bucket_writer_disconnect_markers: + canary, disconnect_marker = self._bucket_writer_disconnect_markers.pop(bw) + canary.dontNotifyOnDisconnect(disconnect_marker) + + def remote_get_version(self): + return self._server.get_version() + + def remote_allocate_buckets(self, storage_index, + renew_secret, cancel_secret, + sharenums, allocated_size, + canary, owner_num=0): + """Foolscap-specific ``allocate_buckets()`` API.""" + alreadygot, bucketwriters = self._server.allocate_buckets( + storage_index, renew_secret, cancel_secret, sharenums, allocated_size, + owner_num=owner_num, renew_leases=True, + ) + + # Abort BucketWriters if disconnection happens. + for bw in bucketwriters.values(): + disconnect_marker = canary.notifyOnDisconnect(bw.disconnected) + self._bucket_writer_disconnect_markers[bw] = (canary, disconnect_marker) + + # Wrap BucketWriters with Foolscap adapter: + bucketwriters = { + k: FoolscapBucketWriter(bw) + for (k, bw) in bucketwriters.items() + } + + return alreadygot, bucketwriters + + def remote_add_lease(self, storage_index, renew_secret, cancel_secret, + owner_num=1): + return self._server.add_lease(storage_index, renew_secret, cancel_secret) + + def remote_renew_lease(self, storage_index, renew_secret): + return self._server.renew_lease(storage_index, renew_secret) + + def remote_get_buckets(self, storage_index): + return { + k: FoolscapBucketReader(bucket) + for (k, bucket) in self._server.get_buckets(storage_index).items() + } + + def remote_slot_testv_and_readv_and_writev(self, storage_index, + secrets, + test_and_write_vectors, + read_vector): + return self._server.slot_testv_and_readv_and_writev( + storage_index, + secrets, + test_and_write_vectors, + read_vector, + renew_leases=True, + ) + + def remote_slot_readv(self, storage_index, shares, readv): + return self._server.slot_readv(storage_index, shares, readv) + + def remote_advise_corrupt_share(self, share_type, storage_index, shnum, + reason): + return self._server.advise_corrupt_share(share_type, storage_index, shnum, + reason) + + +CORRUPTION_REPORT_FORMAT = """\ +report: Share Corruption +type: {type} +storage_index: {storage_index} +share_number: {share_number} + +{reason} + +""" + +def render_corruption_report(share_type, si_s, shnum, reason): + """ + Create a string that explains a corruption report using freeform text. + + :param bytes share_type: The type of the share which the report is about. + + :param bytes si_s: The encoded representation of the storage index which + the report is about. + + :param int shnum: The share number which the report is about. + + :param bytes reason: The reason given by the client for the corruption + report. + """ + return CORRUPTION_REPORT_FORMAT.format( + type=bytes_to_native_str(share_type), + storage_index=bytes_to_native_str(si_s), + share_number=shnum, + reason=bytes_to_native_str(reason), + ) + +def get_corruption_report_path(base_dir, now, si_s, shnum): + """ + Determine the path to which a certain corruption report should be written. + + :param str base_dir: The directory beneath which to construct the path. + + :param str now: The time of the report. + + :param str si_s: The encoded representation of the storage index which the + report is about. + + :param int shnum: The share number which the report is about. + + :return str: A path to which the report can be written. + """ + # windows can't handle colons in the filename + return os.path.join( + base_dir, + ("%s--%s-%d" % (now, str(si_s, "utf-8"), shnum)).replace(":","") + ) diff --git a/src/allmydata/storage/shares.py b/src/allmydata/storage/shares.py index ec6c0a501..59e7b1539 100644 --- a/src/allmydata/storage/shares.py +++ b/src/allmydata/storage/shares.py @@ -17,8 +17,7 @@ from allmydata.storage.immutable import ShareFile def get_share_file(filename): with open(filename, "rb") as f: prefix = f.read(32) - if prefix == MutableShareFile.MAGIC: + if MutableShareFile.is_valid_header(prefix): return MutableShareFile(filename) # otherwise assume it's immutable return ShareFile(filename) - diff --git a/src/allmydata/storage_client.py b/src/allmydata/storage_client.py index ac6c107d5..8e9ad3656 100644 --- a/src/allmydata/storage_client.py +++ b/src/allmydata/storage_client.py @@ -5,10 +5,6 @@ the foolscap-based server implemented in src/allmydata/storage/*.py . Ported to Python 3. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals # roadmap: # @@ -34,23 +30,25 @@ from __future__ import unicode_literals # # 6: implement other sorts of IStorageClient classes: S3, etc -from future.utils import PY2 -if PY2: - from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 +from __future__ import annotations + from six import ensure_text - +from typing import Union import re, time, hashlib - -# On Python 2 this will be the backport. +from os import urandom from configparser import NoSectionError import attr +from hyperlink import DecodedURL from zope.interface import ( Attribute, Interface, implementer, ) -from twisted.internet import defer +from twisted.python.failure import Failure +from twisted.web import http +from twisted.internet.task import LoopingCall +from twisted.internet import defer, reactor from twisted.application import service from twisted.plugin import ( getPlugins, @@ -58,7 +56,7 @@ from twisted.plugin import ( from eliot import ( log_call, ) -from foolscap.api import eventually +from foolscap.api import eventually, RemoteException from foolscap.reconnector import ( ReconnectionInfo, ) @@ -75,6 +73,14 @@ from allmydata.util.observer import ObserverList from allmydata.util.rrefutil import add_version_to_remote_reference from allmydata.util.hashutil import permute_server_hash from allmydata.util.dictutil import BytesKeyDict, UnicodeKeyDict +from allmydata.util.deferredutil import async_to_deferred +from allmydata.storage.http_client import ( + StorageClient, StorageClientImmutables, StorageClientGeneral, + ClientException as HTTPClientException, StorageClientMutables, + ReadVector, TestWriteVectors, WriteVector, TestVector, ClientException +) + +ANONYMOUS_STORAGE_NURLS = "anonymous-storage-NURLs" # who is responsible for de-duplication? @@ -100,8 +106,8 @@ class StorageClientConfig(object): :ivar preferred_peers: An iterable of the server-ids (``bytes``) of the storage servers where share placement is preferred, in order of - decreasing preference. See the *[client]peers.preferred* - documentation for details. + decreasing preference. See the *[client]peers.preferred* documentation + for details. :ivar dict[unicode, dict[unicode, unicode]] storage_plugins: A mapping from names of ``IFoolscapStoragePlugin`` configured in *tahoe.cfg* to the @@ -263,6 +269,10 @@ class StorageFarmBroker(service.MultiService): by the given announcement. """ assert isinstance(server_id, bytes) + if len(server["ann"].get(ANONYMOUS_STORAGE_NURLS, [])) > 0: + s = HTTPNativeStorageServer(server_id, server["ann"]) + s.on_status_changed(lambda _: self._got_connection()) + return s handler_overrides = server.get("connections", {}) s = NativeStorageServer( server_id, @@ -524,6 +534,45 @@ class IFoolscapStorageServer(Interface): """ +def _parse_announcement(server_id: bytes, furl: bytes, ann: dict) -> tuple[str, bytes, bytes, bytes, bytes]: + """ + Parse the furl and announcement, return: + + (nickname, permutation_seed, tubid, short_description, long_description) + """ + m = re.match(br'pb://(\w+)@', furl) + assert m, furl + tubid_s = m.group(1).lower() + tubid = base32.a2b(tubid_s) + if "permutation-seed-base32" in ann: + seed = ann["permutation-seed-base32"] + if isinstance(seed, str): + seed = seed.encode("utf-8") + ps = base32.a2b(seed) + elif re.search(br'^v0-[0-9a-zA-Z]{52}$', server_id): + ps = base32.a2b(server_id[3:]) + else: + log.msg("unable to parse serverid '%(server_id)s as pubkey, " + "hashing it to get permutation-seed, " + "may not converge with other clients", + server_id=server_id, + facility="tahoe.storage_broker", + level=log.UNUSUAL, umid="qu86tw") + ps = hashlib.sha256(server_id).digest() + permutation_seed = ps + + assert server_id + long_description = server_id + if server_id.startswith(b"v0-"): + # remove v0- prefix from abbreviated name + short_description = server_id[3:3+8] + else: + short_description = server_id[:8] + nickname = ann.get("nickname", "") + + return (nickname, permutation_seed, tubid, short_description, long_description) + + @implementer(IFoolscapStorageServer) @attr.s(frozen=True) class _FoolscapStorage(object): @@ -567,43 +616,13 @@ class _FoolscapStorage(object): The furl will be a Unicode string on Python 3; on Python 2 it will be either a native (bytes) string or a Unicode string. """ - furl = furl.encode("utf-8") - m = re.match(br'pb://(\w+)@', furl) - assert m, furl - tubid_s = m.group(1).lower() - tubid = base32.a2b(tubid_s) - if "permutation-seed-base32" in ann: - seed = ann["permutation-seed-base32"] - if isinstance(seed, str): - seed = seed.encode("utf-8") - ps = base32.a2b(seed) - elif re.search(br'^v0-[0-9a-zA-Z]{52}$', server_id): - ps = base32.a2b(server_id[3:]) - else: - log.msg("unable to parse serverid '%(server_id)s as pubkey, " - "hashing it to get permutation-seed, " - "may not converge with other clients", - server_id=server_id, - facility="tahoe.storage_broker", - level=log.UNUSUAL, umid="qu86tw") - ps = hashlib.sha256(server_id).digest() - permutation_seed = ps - - assert server_id - long_description = server_id - if server_id.startswith(b"v0-"): - # remove v0- prefix from abbreviated name - short_description = server_id[3:3+8] - else: - short_description = server_id[:8] - nickname = ann.get("nickname", "") - + (nickname, permutation_seed, tubid, short_description, long_description) = _parse_announcement(server_id, furl.encode("utf-8"), ann) return cls( nickname=nickname, permutation_seed=permutation_seed, tubid=tubid, storage_server=storage_server, - furl=furl, + furl=furl.encode("utf-8"), short_description=short_description, long_description=long_description, ) @@ -685,6 +704,16 @@ def _storage_from_foolscap_plugin(node_config, config, announcement, get_rref): raise AnnouncementNotMatched() +def _available_space_from_version(version): + if version is None: + return None + protocol_v1_version = version.get(b'http://allmydata.org/tahoe/protocols/storage/v1', BytesKeyDict()) + available_space = protocol_v1_version.get(b'available-space') + if available_space is None: + available_space = protocol_v1_version.get(b'maximum-immutable-share-size', None) + return available_space + + @implementer(IServer) class NativeStorageServer(service.MultiService): """I hold information about a storage server that we want to connect to. @@ -843,13 +872,7 @@ class NativeStorageServer(service.MultiService): def get_available_space(self): version = self.get_version() - if version is None: - return None - protocol_v1_version = version.get(b'http://allmydata.org/tahoe/protocols/storage/v1', BytesKeyDict()) - available_space = protocol_v1_version.get(b'available-space') - if available_space is None: - available_space = protocol_v1_version.get(b'maximum-immutable-share-size', None) - return available_space + return _available_space_from_version(version) def start_connecting(self, trigger_cb): self._tub = self._tub_maker(self._handler_overrides) @@ -911,6 +934,164 @@ class NativeStorageServer(service.MultiService): # used when the broker wants us to hurry up self._reconnector.reset() + +@implementer(IServer) +class HTTPNativeStorageServer(service.MultiService): + """ + Like ``NativeStorageServer``, but for HTTP clients. + + The notion of being "connected" is less meaningful for HTTP; we just poll + occasionally, and if we've succeeded at last poll, we assume we're + "connected". + """ + + def __init__(self, server_id: bytes, announcement, reactor=reactor): + service.MultiService.__init__(self) + assert isinstance(server_id, bytes) + self._server_id = server_id + self.announcement = announcement + self._on_status_changed = ObserverList() + self._reactor = reactor + furl = announcement["anonymous-storage-FURL"].encode("utf-8") + ( + self._nickname, + self._permutation_seed, + self._tubid, + self._short_description, + self._long_description + ) = _parse_announcement(server_id, furl, announcement) + # TODO need some way to do equivalent of Happy Eyeballs for multiple NURLs? + # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3935 + nurl = DecodedURL.from_text(announcement[ANONYMOUS_STORAGE_NURLS][0]) + self._istorage_server = _HTTPStorageServer.from_http_client( + StorageClient.from_nurl(nurl, reactor) + ) + + self._connection_status = connection_status.ConnectionStatus.unstarted() + self._version = None + self._last_connect_time = None + self._connecting_deferred = None + + def get_permutation_seed(self): + return self._permutation_seed + + def get_name(self): + return self._short_description + + def get_longname(self): + return self._long_description + + def get_tubid(self): + return self._tubid + + def get_lease_seed(self): + # Apparently this is what Foolscap version above does?! + return self._tubid + + def get_foolscap_write_enabler_seed(self): + return self._tubid + + def get_nickname(self): + return self._nickname + + def on_status_changed(self, status_changed): + """ + :param status_changed: a callable taking a single arg (the + NativeStorageServer) that is notified when we become connected + """ + return self._on_status_changed.subscribe(status_changed) + + # Special methods used by copy.copy() and copy.deepcopy(). When those are + # used in allmydata.immutable.filenode to copy CheckResults during + # repair, we want it to treat the IServer instances as singletons, and + # not attempt to duplicate them.. + def __copy__(self): + return self + + def __deepcopy__(self, memodict): + return self + + def __repr__(self): + return "" % self.get_name() + + def get_serverid(self): + return self._server_id + + def get_version(self): + return self._version + + def get_announcement(self): + return self.announcement + + def get_connection_status(self): + return self._connection_status + + def is_connected(self): + return self._connection_status.connected + + def get_available_space(self): + version = self.get_version() + return _available_space_from_version(version) + + def start_connecting(self, trigger_cb): + self._lc = LoopingCall(self._connect) + self._lc.start(1, True) + + def _got_version(self, version): + self._last_connect_time = time.time() + self._version = version + self._connection_status = connection_status.ConnectionStatus( + True, "connected", [], self._last_connect_time, self._last_connect_time + ) + self._on_status_changed.notify(self) + + def _failed_to_connect(self, reason): + self._connection_status = connection_status.ConnectionStatus( + False, f"failure: {reason}", [], self._last_connect_time, self._last_connect_time + ) + self._on_status_changed.notify(self) + + def get_storage_server(self): + """ + See ``IServer.get_storage_server``. + """ + if self._connection_status.summary == "unstarted": + return None + return self._istorage_server + + def stop_connecting(self): + self._lc.stop() + if self._connecting_deferred is not None: + self._connecting_deferred.cancel() + + def try_to_connect(self): + self._connect() + + def _connect(self): + result = self._istorage_server.get_version() + + def remove_connecting_deferred(result): + self._connecting_deferred = None + return result + + # Set a short timeout since we're relying on this for server liveness. + self._connecting_deferred = result.addTimeout(5, self._reactor).addBoth( + remove_connecting_deferred).addCallbacks( + self._got_version, + self._failed_to_connect + ) + + def stopService(self): + if self._connecting_deferred is not None: + self._connecting_deferred.cancel() + + result = service.MultiService.stopService(self) + if self._lc.running: + self._lc.stop() + self._failed_to_connect("shut down") + return result + + class UnknownServerTypeError(Exception): pass @@ -1017,10 +1198,256 @@ class _StorageServer(object): shnum, reason, ): - self._rref.callRemote( + return self._rref.callRemote( "advise_corrupt_share", share_type, storage_index, shnum, reason, ).addErrback(log.err, "Error from remote call to advise_corrupt_share") + + + +@attr.s(hash=True) +class _FakeRemoteReference(object): + """ + Emulate a Foolscap RemoteReference, calling a local object instead. + """ + local_object = attr.ib(type=object) + + @defer.inlineCallbacks + def callRemote(self, action, *args, **kwargs): + try: + result = yield getattr(self.local_object, action)(*args, **kwargs) + defer.returnValue(result) + except HTTPClientException as e: + raise RemoteException(e.args) + + +@attr.s +class _HTTPBucketWriter(object): + """ + Emulate a ``RIBucketWriter``, but use HTTP protocol underneath. + """ + client = attr.ib(type=StorageClientImmutables) + storage_index = attr.ib(type=bytes) + share_number = attr.ib(type=int) + upload_secret = attr.ib(type=bytes) + finished = attr.ib(type=defer.Deferred[bool], factory=defer.Deferred) + + def abort(self): + return self.client.abort_upload(self.storage_index, self.share_number, + self.upload_secret) + + @defer.inlineCallbacks + def write(self, offset, data): + result = yield self.client.write_share_chunk( + self.storage_index, self.share_number, self.upload_secret, offset, data + ) + if result.finished: + self.finished.callback(True) + defer.returnValue(None) + + def close(self): + # We're not _really_ closed until all writes have succeeded and we + # finished writing all the data. + return self.finished + + +def _ignore_404(failure: Failure) -> Union[Failure, None]: + """ + Useful for advise_corrupt_share(), since it swallows unknown share numbers + in Foolscap. + """ + if failure.check(HTTPClientException) and failure.value.code == http.NOT_FOUND: + return None + else: + return failure + + +@attr.s(hash=True) +class _HTTPBucketReader(object): + """ + Emulate a ``RIBucketReader``, but use HTTP protocol underneath. + """ + client = attr.ib(type=StorageClientImmutables) + storage_index = attr.ib(type=bytes) + share_number = attr.ib(type=int) + + def read(self, offset, length): + return self.client.read_share_chunk( + self.storage_index, self.share_number, offset, length + ) + + def advise_corrupt_share(self, reason): + return self.client.advise_corrupt_share( + self.storage_index, self.share_number, + str(reason, "utf-8", errors="backslashreplace") + ).addErrback(_ignore_404) + + +# WORK IN PROGRESS, for now it doesn't actually implement whole thing. +@implementer(IStorageServer) # type: ignore +@attr.s +class _HTTPStorageServer(object): + """ + Talk to remote storage server over HTTP. + """ + _http_client = attr.ib(type=StorageClient) + + @staticmethod + def from_http_client(http_client): # type: (StorageClient) -> _HTTPStorageServer + """ + Create an ``IStorageServer`` from a HTTP ``StorageClient``. + """ + return _HTTPStorageServer(http_client=http_client) + + def get_version(self): + return StorageClientGeneral(self._http_client).get_version() + + @defer.inlineCallbacks + def allocate_buckets( + self, + storage_index, + renew_secret, + cancel_secret, + sharenums, + allocated_size, + canary + ): + upload_secret = urandom(20) + immutable_client = StorageClientImmutables(self._http_client) + result = immutable_client.create( + storage_index, sharenums, allocated_size, upload_secret, renew_secret, + cancel_secret + ) + result = yield result + defer.returnValue( + (result.already_have, { + share_num: _FakeRemoteReference(_HTTPBucketWriter( + client=immutable_client, + storage_index=storage_index, + share_number=share_num, + upload_secret=upload_secret + )) + for share_num in result.allocated + }) + ) + + @defer.inlineCallbacks + def get_buckets( + self, + storage_index + ): + immutable_client = StorageClientImmutables(self._http_client) + share_numbers = yield immutable_client.list_shares( + storage_index + ) + defer.returnValue({ + share_num: _FakeRemoteReference(_HTTPBucketReader( + immutable_client, storage_index, share_num + )) + for share_num in share_numbers + }) + + @async_to_deferred + async def add_lease( + self, + storage_index, + renew_secret, + cancel_secret + ): + client = StorageClientGeneral(self._http_client) + try: + await client.add_or_renew_lease( + storage_index, renew_secret, cancel_secret + ) + except ClientException as e: + if e.code == http.NOT_FOUND: + # Silently do nothing, as is the case for the Foolscap client + return + raise + + def advise_corrupt_share( + self, + share_type, + storage_index, + shnum, + reason: bytes + ): + if share_type == b"immutable": + client : Union[StorageClientImmutables, StorageClientMutables] = StorageClientImmutables(self._http_client) + elif share_type == b"mutable": + client = StorageClientMutables(self._http_client) + else: + raise ValueError("Unknown share type") + return client.advise_corrupt_share( + storage_index, shnum, str(reason, "utf-8", errors="backslashreplace") + ).addErrback(_ignore_404) + + @defer.inlineCallbacks + def slot_readv(self, storage_index, shares, readv): + mutable_client = StorageClientMutables(self._http_client) + pending_reads = {} + reads = {} + # If shares list is empty, that means list all shares, so we need + # to do a query to get that. + if not shares: + shares = yield mutable_client.list_shares(storage_index) + + # Start all the queries in parallel: + for share_number in shares: + share_reads = defer.gatherResults( + [ + mutable_client.read_share_chunk( + storage_index, share_number, offset, length + ) + for (offset, length) in readv + ] + ) + pending_reads[share_number] = share_reads + + # Wait for all the queries to finish: + for share_number, pending_result in pending_reads.items(): + reads[share_number] = yield pending_result + + return reads + + @defer.inlineCallbacks + def slot_testv_and_readv_and_writev( + self, + storage_index, + secrets, + tw_vectors, + r_vector, + ): + mutable_client = StorageClientMutables(self._http_client) + we_secret, lr_secret, lc_secret = secrets + client_tw_vectors = {} + for share_num, (test_vector, data_vector, new_length) in tw_vectors.items(): + client_test_vectors = [ + TestVector(offset=offset, size=size, specimen=specimen) + for (offset, size, specimen) in test_vector + ] + client_write_vectors = [ + WriteVector(offset=offset, data=data) for (offset, data) in data_vector + ] + client_tw_vectors[share_num] = TestWriteVectors( + test_vectors=client_test_vectors, + write_vectors=client_write_vectors, + new_length=new_length + ) + client_read_vectors = [ + ReadVector(offset=offset, size=size) + for (offset, size) in r_vector + ] + try: + client_result = yield mutable_client.read_test_write_chunks( + storage_index, we_secret, lr_secret, lc_secret, client_tw_vectors, + client_read_vectors, + ) + except ClientException as e: + if e.code == http.UNAUTHORIZED: + raise RemoteException("Unauthorized write, possibly you passed the wrong write enabler?") + raise + return (client_result.success, client_result.reads) diff --git a/src/allmydata/test/__init__.py b/src/allmydata/test/__init__.py index 893aa15ce..ad245ca77 100644 --- a/src/allmydata/test/__init__.py +++ b/src/allmydata/test/__init__.py @@ -125,5 +125,5 @@ if sys.platform == "win32": initialize() from eliot import to_file -from allmydata.util.jsonbytes import AnyBytesJSONEncoder -to_file(open("eliot.log", "wb"), encoder=AnyBytesJSONEncoder) +from allmydata.util.eliotutil import eliot_json_encoder +to_file(open("eliot.log", "wb"), encoder=eliot_json_encoder) diff --git a/src/allmydata/test/certs.py b/src/allmydata/test/certs.py new file mode 100644 index 000000000..9e6640386 --- /dev/null +++ b/src/allmydata/test/certs.py @@ -0,0 +1,66 @@ +"""Utilities for generating TLS certificates.""" + +import datetime + +from cryptography import x509 +from cryptography.x509.oid import NameOID +from cryptography.hazmat.primitives.asymmetric import rsa +from cryptography.hazmat.primitives import serialization, hashes + +from twisted.python.filepath import FilePath + + +def cert_to_file(path: FilePath, cert) -> FilePath: + """ + Write the given certificate to a file on disk. Returns the path. + """ + path.setContent(cert.public_bytes(serialization.Encoding.PEM)) + return path + + +def private_key_to_file(path: FilePath, private_key) -> FilePath: + """ + Write the given key to a file on disk. Returns the path. + """ + path.setContent( + private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption(), + ) + ) + return path + + +def generate_private_key(): + """Create a RSA private key.""" + return rsa.generate_private_key(public_exponent=65537, key_size=2048) + + +def generate_certificate( + private_key, + expires_days: int = 10, + valid_in_days: int = 0, + org_name: str = "Yoyodyne", +): + """Generate a certificate from a RSA private key.""" + subject = issuer = x509.Name( + [x509.NameAttribute(NameOID.ORGANIZATION_NAME, org_name)] + ) + starts = datetime.datetime.utcnow() + datetime.timedelta(days=valid_in_days) + expires = datetime.datetime.utcnow() + datetime.timedelta(days=expires_days) + return ( + x509.CertificateBuilder() + .subject_name(subject) + .issuer_name(issuer) + .public_key(private_key.public_key()) + .serial_number(x509.random_serial_number()) + .not_valid_before(min(starts, expires)) + .not_valid_after(expires) + .add_extension( + x509.SubjectAlternativeName([x509.DNSName("localhost")]), + critical=False, + # Sign our certificate with our private key + ) + .sign(private_key, hashes.SHA256()) + ) diff --git a/src/allmydata/test/cli/test_admin.py b/src/allmydata/test/cli/test_admin.py new file mode 100644 index 000000000..082904652 --- /dev/null +++ b/src/allmydata/test/cli/test_admin.py @@ -0,0 +1,87 @@ +""" +Ported to Python 3. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 + +from six.moves import StringIO + +from testtools.matchers import ( + Contains, +) + +from twisted.python.filepath import ( + FilePath, +) + +from allmydata.scripts.admin import ( + migrate_crawler, +) +from allmydata.scripts.runner import ( + Options, +) +from ..common import ( + SyncTestCase, +) + +class AdminMigrateCrawler(SyncTestCase): + """ + Tests related to 'tahoe admin migrate-crawler' + """ + + def test_already(self): + """ + We've already migrated; don't do it again. + """ + + root = FilePath(self.mktemp()) + storage = root.child("storage") + storage.makedirs() + with storage.child("lease_checker.state.json").open("w") as f: + f.write(b"{}\n") + + top = Options() + top.parseOptions([ + "admin", "migrate-crawler", + "--basedir", storage.parent().path, + ]) + options = top.subOptions + while hasattr(options, "subOptions"): + options = options.subOptions + options.stdout = StringIO() + migrate_crawler(options) + + self.assertThat( + options.stdout.getvalue(), + Contains("Already converted:"), + ) + + def test_usage(self): + """ + We've already migrated; don't do it again. + """ + + root = FilePath(self.mktemp()) + storage = root.child("storage") + storage.makedirs() + with storage.child("lease_checker.state.json").open("w") as f: + f.write(b"{}\n") + + top = Options() + top.parseOptions([ + "admin", "migrate-crawler", + "--basedir", storage.parent().path, + ]) + options = top.subOptions + while hasattr(options, "subOptions"): + options = options.subOptions + self.assertThat( + str(options), + Contains("security issues with pickle") + ) diff --git a/src/allmydata/test/cli/test_create.py b/src/allmydata/test/cli/test_create.py index 282f26163..609888fb3 100644 --- a/src/allmydata/test/cli/test_create.py +++ b/src/allmydata/test/cli/test_create.py @@ -11,16 +11,24 @@ if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 import os -import mock + +try: + from typing import Any, List, Tuple +except ImportError: + pass + from twisted.trial import unittest from twisted.internet import defer, reactor from twisted.python import usage from allmydata.util import configutil +from allmydata.util import tor_provider, i2p_provider from ..common_util import run_cli, parse_cli +from ..common import ( + disable_modules, +) from ...scripts import create_node from ... import client - def read_config(basedir): tahoe_cfg = os.path.join(basedir, "tahoe.cfg") config = configutil.get_config(tahoe_cfg) @@ -105,11 +113,12 @@ class Config(unittest.TestCase): @defer.inlineCallbacks def test_client_hide_ip_no_i2p_txtorcon(self): - # hmm, I must be doing something weird, these don't work as - # @mock.patch decorators for some reason - txi2p = mock.patch('allmydata.util.i2p_provider._import_txi2p', return_value=None) - txtorcon = mock.patch('allmydata.util.tor_provider._import_txtorcon', return_value=None) - with txi2p, txtorcon: + """ + The ``create-client`` sub-command tells the user to install the necessary + dependencies if they have neither tor nor i2p support installed and + they request network location privacy with the ``--hide-ip`` flag. + """ + with disable_modules("txi2p", "txtorcon"): basedir = self.mktemp() rc, out, err = yield run_cli("create-client", "--hide-ip", basedir) self.assertTrue(rc != 0, out) @@ -118,8 +127,7 @@ class Config(unittest.TestCase): @defer.inlineCallbacks def test_client_i2p_option_no_txi2p(self): - txi2p = mock.patch('allmydata.util.i2p_provider._import_txi2p', return_value=None) - with txi2p: + with disable_modules("txi2p"): basedir = self.mktemp() rc, out, err = yield run_cli("create-node", "--listen=i2p", "--i2p-launch", basedir) self.assertTrue(rc != 0) @@ -127,8 +135,7 @@ class Config(unittest.TestCase): @defer.inlineCallbacks def test_client_tor_option_no_txtorcon(self): - txtorcon = mock.patch('allmydata.util.tor_provider._import_txtorcon', return_value=None) - with txtorcon: + with disable_modules("txtorcon"): basedir = self.mktemp() rc, out, err = yield run_cli("create-node", "--listen=tor", "--tor-launch", basedir) self.assertTrue(rc != 0) @@ -145,9 +152,7 @@ class Config(unittest.TestCase): @defer.inlineCallbacks def test_client_hide_ip_no_txtorcon(self): - txtorcon = mock.patch('allmydata.util.tor_provider._import_txtorcon', - return_value=None) - with txtorcon: + with disable_modules("txtorcon"): basedir = self.mktemp() rc, out, err = yield run_cli("create-client", "--hide-ip", basedir) self.assertEqual(0, rc) @@ -295,11 +300,10 @@ class Config(unittest.TestCase): def test_node_slow_tor(self): basedir = self.mktemp() d = defer.Deferred() - with mock.patch("allmydata.util.tor_provider.create_config", - return_value=d): - d2 = run_cli("create-node", "--listen=tor", basedir) - d.callback(({}, "port", "location")) - rc, out, err = yield d2 + self.patch(tor_provider, "create_config", lambda *a, **kw: d) + d2 = run_cli("create-node", "--listen=tor", basedir) + d.callback(({}, "port", "location")) + rc, out, err = yield d2 self.assertEqual(rc, 0) self.assertIn("Node created", out) self.assertEqual(err, "") @@ -308,11 +312,10 @@ class Config(unittest.TestCase): def test_node_slow_i2p(self): basedir = self.mktemp() d = defer.Deferred() - with mock.patch("allmydata.util.i2p_provider.create_config", - return_value=d): - d2 = run_cli("create-node", "--listen=i2p", basedir) - d.callback(({}, "port", "location")) - rc, out, err = yield d2 + self.patch(i2p_provider, "create_config", lambda *a, **kw: d) + d2 = run_cli("create-node", "--listen=i2p", basedir) + d.callback(({}, "port", "location")) + rc, out, err = yield d2 self.assertEqual(rc, 0) self.assertIn("Node created", out) self.assertEqual(err, "") @@ -353,6 +356,27 @@ class Config(unittest.TestCase): self.assertIn("is not empty", err) self.assertIn("To avoid clobbering anything, I am going to quit now", err) +def fake_config(testcase, module, result): + # type: (unittest.TestCase, Any, Any) -> List[Tuple] + """ + Monkey-patch a fake configuration function into the given module. + + :param testcase: The test case to use to do the monkey-patching. + + :param module: The module into which to patch the fake function. + + :param result: The return value for the fake function. + + :return: A list of tuples of the arguments the fake function was called + with. + """ + calls = [] + def fake_config(reactor, cli_config): + calls.append((reactor, cli_config)) + return result + testcase.patch(module, "create_config", fake_config) + return calls + class Tor(unittest.TestCase): def test_default(self): basedir = self.mktemp() @@ -360,12 +384,14 @@ class Tor(unittest.TestCase): tor_port = "ghi" tor_location = "jkl" config_d = defer.succeed( (tor_config, tor_port, tor_location) ) - with mock.patch("allmydata.util.tor_provider.create_config", - return_value=config_d) as co: - rc, out, err = self.successResultOf( - run_cli("create-node", "--listen=tor", basedir)) - self.assertEqual(len(co.mock_calls), 1) - args = co.mock_calls[0][1] + + calls = fake_config(self, tor_provider, config_d) + rc, out, err = self.successResultOf( + run_cli("create-node", "--listen=tor", basedir), + ) + + self.assertEqual(len(calls), 1) + args = calls[0] self.assertIdentical(args[0], reactor) self.assertIsInstance(args[1], create_node.CreateNodeOptions) self.assertEqual(args[1]["listen"], "tor") @@ -380,12 +406,15 @@ class Tor(unittest.TestCase): tor_port = "ghi" tor_location = "jkl" config_d = defer.succeed( (tor_config, tor_port, tor_location) ) - with mock.patch("allmydata.util.tor_provider.create_config", - return_value=config_d) as co: - rc, out, err = self.successResultOf( - run_cli("create-node", "--listen=tor", "--tor-launch", - basedir)) - args = co.mock_calls[0][1] + + calls = fake_config(self, tor_provider, config_d) + rc, out, err = self.successResultOf( + run_cli( + "create-node", "--listen=tor", "--tor-launch", + basedir, + ), + ) + args = calls[0] self.assertEqual(args[1]["listen"], "tor") self.assertEqual(args[1]["tor-launch"], True) self.assertEqual(args[1]["tor-control-port"], None) @@ -396,12 +425,15 @@ class Tor(unittest.TestCase): tor_port = "ghi" tor_location = "jkl" config_d = defer.succeed( (tor_config, tor_port, tor_location) ) - with mock.patch("allmydata.util.tor_provider.create_config", - return_value=config_d) as co: - rc, out, err = self.successResultOf( - run_cli("create-node", "--listen=tor", "--tor-control-port=mno", - basedir)) - args = co.mock_calls[0][1] + + calls = fake_config(self, tor_provider, config_d) + rc, out, err = self.successResultOf( + run_cli( + "create-node", "--listen=tor", "--tor-control-port=mno", + basedir, + ), + ) + args = calls[0] self.assertEqual(args[1]["listen"], "tor") self.assertEqual(args[1]["tor-launch"], False) self.assertEqual(args[1]["tor-control-port"], "mno") @@ -434,12 +466,13 @@ class I2P(unittest.TestCase): i2p_port = "ghi" i2p_location = "jkl" dest_d = defer.succeed( (i2p_config, i2p_port, i2p_location) ) - with mock.patch("allmydata.util.i2p_provider.create_config", - return_value=dest_d) as co: - rc, out, err = self.successResultOf( - run_cli("create-node", "--listen=i2p", basedir)) - self.assertEqual(len(co.mock_calls), 1) - args = co.mock_calls[0][1] + + calls = fake_config(self, i2p_provider, dest_d) + rc, out, err = self.successResultOf( + run_cli("create-node", "--listen=i2p", basedir), + ) + self.assertEqual(len(calls), 1) + args = calls[0] self.assertIdentical(args[0], reactor) self.assertIsInstance(args[1], create_node.CreateNodeOptions) self.assertEqual(args[1]["listen"], "i2p") @@ -461,12 +494,15 @@ class I2P(unittest.TestCase): i2p_port = "ghi" i2p_location = "jkl" dest_d = defer.succeed( (i2p_config, i2p_port, i2p_location) ) - with mock.patch("allmydata.util.i2p_provider.create_config", - return_value=dest_d) as co: - rc, out, err = self.successResultOf( - run_cli("create-node", "--listen=i2p", "--i2p-sam-port=mno", - basedir)) - args = co.mock_calls[0][1] + + calls = fake_config(self, i2p_provider, dest_d) + rc, out, err = self.successResultOf( + run_cli( + "create-node", "--listen=i2p", "--i2p-sam-port=mno", + basedir, + ), + ) + args = calls[0] self.assertEqual(args[1]["listen"], "i2p") self.assertEqual(args[1]["i2p-launch"], False) self.assertEqual(args[1]["i2p-sam-port"], "mno") diff --git a/src/allmydata/test/cli/test_invite.py b/src/allmydata/test/cli/test_invite.py index 20d012995..07756eeed 100644 --- a/src/allmydata/test/cli/test_invite.py +++ b/src/allmydata/test/cli/test_invite.py @@ -1,75 +1,117 @@ """ -Ported to Pythn 3. +Tests for ``tahoe invite``. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals -from future.utils import PY2 -if PY2: - from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 +from __future__ import annotations -import os -import mock import json +import os +from functools import partial from os.path import join +from typing import Awaitable, Callable, Optional, Sequence, TypeVar, Union -try: - from typing import Optional, Sequence -except ImportError: - pass - -from twisted.trial import unittest from twisted.internet import defer +from twisted.trial import unittest + +from ...client import read_config +from ...scripts import runner +from ...util.jsonbytes import dumps_bytes from ..common_util import run_cli from ..no_network import GridTestMixin from .common import CLITestMixin -from ...client import ( - read_config, -) +from .wormholetesting import IWormhole, MemoryWormholeServer, TestingHelper, memory_server -class _FakeWormhole(object): +# Logically: +# JSONable = dict[str, Union[JSONable, None, int, float, str, list[JSONable]]] +# +# But practically: +JSONable = Union[dict, None, int, float, str, list] - def __init__(self, outgoing_messages): - self.messages = [] - for o in outgoing_messages: - assert isinstance(o, bytes) - self._outgoing = outgoing_messages - def get_code(self): - return defer.succeed(u"6-alarmist-tuba") +async def open_wormhole() -> tuple[Callable, IWormhole, str]: + """ + Create a new in-memory wormhole server, open one end of a wormhole, and + return it and related info. - def set_code(self, code): - self._code = code + :return: A three-tuple allowing use of the wormhole. The first element is + a callable like ``run_cli`` but which will run commands so that they + use the in-memory wormhole server instead of a real one. The second + element is the open wormhole. The third element is the wormhole's + code. + """ + server = MemoryWormholeServer() + options = runner.Options() + options.wormhole = server + reactor = object() - def get_welcome(self): - return defer.succeed( - { - u"welcome": {}, - } + wormhole = server.create( + "tahoe-lafs.org/invite", + "ws://wormhole.tahoe-lafs.org:4000/v1", + reactor, + ) + code = await wormhole.get_code() + + return (partial(run_cli, options=options), wormhole, code) + + +def make_simple_peer( + reactor, + server: MemoryWormholeServer, + helper: TestingHelper, + messages: Sequence[JSONable], +) -> Callable[[], Awaitable[IWormhole]]: + """ + Make a wormhole peer that just sends the given messages. + + The returned function returns an awaitable that fires with the peer's end + of the wormhole. + """ + async def peer() -> IWormhole: + # Run the client side of the invitation by manually pumping a + # message through the wormhole. + + # First, wait for the server to create the wormhole at all. + wormhole = await helper.wait_for_wormhole( + "tahoe-lafs.org/invite", + "ws://wormhole.tahoe-lafs.org:4000/v1", ) + # Then read out its code and open the other side of the wormhole. + code = await wormhole.when_code() + other_end = server.create( + "tahoe-lafs.org/invite", + "ws://wormhole.tahoe-lafs.org:4000/v1", + reactor, + ) + other_end.set_code(code) + send_messages(other_end, messages) + return other_end - def allocate_code(self): - return None - - def send_message(self, msg): - assert isinstance(msg, bytes) - self.messages.append(msg) - - def get_message(self): - return defer.succeed(self._outgoing.pop(0)) - - def close(self): - return defer.succeed(None) + return peer -def _create_fake_wormhole(outgoing_messages): - outgoing_messages = [ - m.encode("utf-8") if isinstance(m, str) else m - for m in outgoing_messages - ] - return _FakeWormhole(outgoing_messages) +def send_messages(wormhole: IWormhole, messages: Sequence[JSONable]) -> None: + """ + Send a list of message through a wormhole. + """ + for msg in messages: + wormhole.send_message(dumps_bytes(msg)) + + +A = TypeVar("A") +B = TypeVar("B") + +def concurrently( + client: Callable[[], Awaitable[A]], + server: Callable[[], Awaitable[B]], +) -> defer.Deferred[tuple[A, B]]: + """ + Run two asynchronous functions concurrently and asynchronously return a + tuple of both their results. + """ + return defer.gatherResults([ + defer.Deferred.fromCoroutine(client()), + defer.Deferred.fromCoroutine(server()), + ]) class Join(GridTestMixin, CLITestMixin, unittest.TestCase): @@ -86,41 +128,39 @@ class Join(GridTestMixin, CLITestMixin, unittest.TestCase): successfully join after an invite """ node_dir = self.mktemp() + run_cli, wormhole, code = yield defer.Deferred.fromCoroutine(open_wormhole()) + send_messages(wormhole, [ + {u"abilities": {u"server-v1": {}}}, + { + u"shares-needed": 1, + u"shares-happy": 1, + u"shares-total": 1, + u"nickname": u"somethinghopefullyunique", + u"introducer": u"pb://foo", + }, + ]) - with mock.patch('allmydata.scripts.create_node.wormhole') as w: - fake_wh = _create_fake_wormhole([ - json.dumps({u"abilities": {u"server-v1": {}}}), - json.dumps({ - u"shares-needed": 1, - u"shares-happy": 1, - u"shares-total": 1, - u"nickname": u"somethinghopefullyunique", - u"introducer": u"pb://foo", - }), - ]) - w.create = mock.Mock(return_value=fake_wh) + rc, out, err = yield run_cli( + "create-client", + "--join", code, + node_dir, + ) - rc, out, err = yield run_cli( - "create-client", - "--join", "1-abysmal-ant", - node_dir, - ) + self.assertEqual(0, rc) - self.assertEqual(0, rc) + config = read_config(node_dir, u"") + self.assertIn( + "pb://foo", + set( + furl + for (furl, cache) + in config.get_introducer_configuration().values() + ), + ) - config = read_config(node_dir, u"") - self.assertIn( - "pb://foo", - set( - furl - for (furl, cache) - in config.get_introducer_configuration().values() - ), - ) - - with open(join(node_dir, 'tahoe.cfg'), 'r') as f: - config = f.read() - self.assertIn(u"somethinghopefullyunique", config) + with open(join(node_dir, 'tahoe.cfg'), 'r') as f: + config = f.read() + self.assertIn(u"somethinghopefullyunique", config) @defer.inlineCallbacks def test_create_node_illegal_option(self): @@ -128,30 +168,28 @@ class Join(GridTestMixin, CLITestMixin, unittest.TestCase): Server sends JSON with unknown/illegal key """ node_dir = self.mktemp() + run_cli, wormhole, code = yield defer.Deferred.fromCoroutine(open_wormhole()) + send_messages(wormhole, [ + {u"abilities": {u"server-v1": {}}}, + { + u"shares-needed": 1, + u"shares-happy": 1, + u"shares-total": 1, + u"nickname": u"somethinghopefullyunique", + u"introducer": u"pb://foo", + u"something-else": u"not allowed", + }, + ]) - with mock.patch('allmydata.scripts.create_node.wormhole') as w: - fake_wh = _create_fake_wormhole([ - json.dumps({u"abilities": {u"server-v1": {}}}), - json.dumps({ - u"shares-needed": 1, - u"shares-happy": 1, - u"shares-total": 1, - u"nickname": u"somethinghopefullyunique", - u"introducer": u"pb://foo", - u"something-else": u"not allowed", - }), - ]) - w.create = mock.Mock(return_value=fake_wh) + rc, out, err = yield run_cli( + "create-client", + "--join", code, + node_dir, + ) - rc, out, err = yield run_cli( - "create-client", - "--join", "1-abysmal-ant", - node_dir, - ) - - # should still succeed -- just ignores the not-whitelisted - # "something-else" option - self.assertEqual(0, rc) + # should still succeed -- just ignores the not-whitelisted + # "something-else" option + self.assertEqual(0, rc) class Invite(GridTestMixin, CLITestMixin, unittest.TestCase): @@ -168,8 +206,7 @@ class Invite(GridTestMixin, CLITestMixin, unittest.TestCase): intro_dir, ) - def _invite_success(self, extra_args=(), tahoe_config=None): - # type: (Sequence[bytes], Optional[bytes]) -> defer.Deferred + async def _invite_success(self, extra_args: Sequence[bytes] = (), tahoe_config: Optional[bytes] = None) -> str: """ Exercise an expected-success case of ``tahoe invite``. @@ -190,53 +227,58 @@ class Invite(GridTestMixin, CLITestMixin, unittest.TestCase): with open(join(intro_dir, "tahoe.cfg"), "wb") as fobj_cfg: fobj_cfg.write(tahoe_config) - with mock.patch('allmydata.scripts.tahoe_invite.wormhole') as w: - fake_wh = _create_fake_wormhole([ - json.dumps({u"abilities": {u"client-v1": {}}}), - ]) - w.create = mock.Mock(return_value=fake_wh) + wormhole_server, helper = memory_server() + options = runner.Options() + options.wormhole = wormhole_server + reactor = object() - extra_args = tuple(extra_args) - - d = run_cli( + async def server(): + # Run the server side of the invitation process using the CLI. + rc, out, err = await run_cli( "-d", intro_dir, "invite", - *(extra_args + ("foo",)) + *tuple(extra_args) + ("foo",), + options=options, ) - def done(result): - rc, out, err = result - self.assertEqual(2, len(fake_wh.messages)) - self.assertEqual( - json.loads(fake_wh.messages[0]), - { - "abilities": - { - "server-v1": {} - }, - }, - ) - invite = json.loads(fake_wh.messages[1]) - self.assertEqual( - invite["nickname"], "foo", - ) - self.assertEqual( - invite["introducer"], "pb://fooblam", - ) - return invite - d.addCallback(done) - return d + # Send a proper client abilities message. + client = make_simple_peer(reactor, wormhole_server, helper, [{u"abilities": {u"client-v1": {}}}]) + other_end, _ = await concurrently(client, server) + + # Check the server's messages. First, it should announce its + # abilities correctly. + server_abilities = json.loads(await other_end.when_received()) + self.assertEqual( + server_abilities, + { + "abilities": + { + "server-v1": {} + }, + }, + ) + + # Second, it should have an invitation with a nickname and introducer + # furl. + invite = json.loads(await other_end.when_received()) + self.assertEqual( + invite["nickname"], "foo", + ) + self.assertEqual( + invite["introducer"], "pb://fooblam", + ) + return invite @defer.inlineCallbacks def test_invite_success(self): """ successfully send an invite """ - invite = yield self._invite_success(( + invite = yield defer.Deferred.fromCoroutine(self._invite_success(( "--shares-needed", "1", "--shares-happy", "2", "--shares-total", "3", - )) + ))) self.assertEqual( invite["shares-needed"], "1", ) @@ -253,12 +295,12 @@ class Invite(GridTestMixin, CLITestMixin, unittest.TestCase): If ``--shares-{needed,happy,total}`` are not given on the command line then the invitation is generated using the configured values. """ - invite = yield self._invite_success(tahoe_config=b""" + invite = yield defer.Deferred.fromCoroutine(self._invite_success(tahoe_config=b""" [client] shares.needed = 2 shares.happy = 4 shares.total = 6 -""") +""")) self.assertEqual( invite["shares-needed"], "2", ) @@ -277,22 +319,20 @@ shares.total = 6 """ intro_dir = os.path.join(self.basedir, "introducer") - with mock.patch('allmydata.scripts.tahoe_invite.wormhole') as w: - fake_wh = _create_fake_wormhole([ - json.dumps({u"abilities": {u"client-v1": {}}}), - ]) - w.create = mock.Mock(return_value=fake_wh) + options = runner.Options() + options.wormhole = None - rc, out, err = yield run_cli( - "-d", intro_dir, - "invite", - "--shares-needed", "1", - "--shares-happy", "1", - "--shares-total", "1", - "foo", - ) - self.assertNotEqual(rc, 0) - self.assertIn(u"Can't find introducer FURL", out + err) + rc, out, err = yield run_cli( + "-d", intro_dir, + "invite", + "--shares-needed", "1", + "--shares-happy", "1", + "--shares-total", "1", + "foo", + options=options, + ) + self.assertNotEqual(rc, 0) + self.assertIn(u"Can't find introducer FURL", out + err) @defer.inlineCallbacks def test_invite_wrong_client_abilities(self): @@ -306,23 +346,28 @@ shares.total = 6 with open(join(priv_dir, "introducer.furl"), "w") as f: f.write("pb://fooblam\n") - with mock.patch('allmydata.scripts.tahoe_invite.wormhole') as w: - fake_wh = _create_fake_wormhole([ - json.dumps({u"abilities": {u"client-v9000": {}}}), - ]) - w.create = mock.Mock(return_value=fake_wh) + wormhole_server, helper = memory_server() + options = runner.Options() + options.wormhole = wormhole_server + reactor = object() - rc, out, err = yield run_cli( + async def server(): + rc, out, err = await run_cli( "-d", intro_dir, "invite", "--shares-needed", "1", "--shares-happy", "1", "--shares-total", "1", "foo", + options=options, ) self.assertNotEqual(rc, 0) self.assertIn(u"No 'client-v1' in abilities", out + err) + # Send some surprising client abilities. + client = make_simple_peer(reactor, wormhole_server, helper, [{u"abilities": {u"client-v9000": {}}}]) + yield concurrently(client, server) + @defer.inlineCallbacks def test_invite_no_client_abilities(self): """ @@ -335,23 +380,30 @@ shares.total = 6 with open(join(priv_dir, "introducer.furl"), "w") as f: f.write("pb://fooblam\n") - with mock.patch('allmydata.scripts.tahoe_invite.wormhole') as w: - fake_wh = _create_fake_wormhole([ - json.dumps({}), - ]) - w.create = mock.Mock(return_value=fake_wh) + wormhole_server, helper = memory_server() + options = runner.Options() + options.wormhole = wormhole_server + reactor = object() - rc, out, err = yield run_cli( + async def server(): + # Run the server side of the invitation process using the CLI. + rc, out, err = await run_cli( "-d", intro_dir, "invite", "--shares-needed", "1", "--shares-happy", "1", "--shares-total", "1", "foo", + options=options, ) self.assertNotEqual(rc, 0) self.assertIn(u"No 'abilities' from client", out + err) + # Send a no-abilities message through to the server. + client = make_simple_peer(reactor, wormhole_server, helper, [{}]) + yield concurrently(client, server) + + @defer.inlineCallbacks def test_invite_wrong_server_abilities(self): """ @@ -364,26 +416,25 @@ shares.total = 6 with open(join(priv_dir, "introducer.furl"), "w") as f: f.write("pb://fooblam\n") - with mock.patch('allmydata.scripts.create_node.wormhole') as w: - fake_wh = _create_fake_wormhole([ - json.dumps({u"abilities": {u"server-v9000": {}}}), - json.dumps({ - "shares-needed": "1", - "shares-total": "1", - "shares-happy": "1", - "nickname": "foo", - "introducer": "pb://fooblam", - }), - ]) - w.create = mock.Mock(return_value=fake_wh) + run_cli, wormhole, code = yield defer.Deferred.fromCoroutine(open_wormhole()) + send_messages(wormhole, [ + {u"abilities": {u"server-v9000": {}}}, + { + "shares-needed": "1", + "shares-total": "1", + "shares-happy": "1", + "nickname": "foo", + "introducer": "pb://fooblam", + }, + ]) - rc, out, err = yield run_cli( - "create-client", - "--join", "1-alarmist-tuba", - "foo", - ) - self.assertNotEqual(rc, 0) - self.assertIn("Expected 'server-v1' in server abilities", out + err) + rc, out, err = yield run_cli( + "create-client", + "--join", code, + "foo", + ) + self.assertNotEqual(rc, 0) + self.assertIn("Expected 'server-v1' in server abilities", out + err) @defer.inlineCallbacks def test_invite_no_server_abilities(self): @@ -397,26 +448,25 @@ shares.total = 6 with open(join(priv_dir, "introducer.furl"), "w") as f: f.write("pb://fooblam\n") - with mock.patch('allmydata.scripts.create_node.wormhole') as w: - fake_wh = _create_fake_wormhole([ - json.dumps({}), - json.dumps({ - "shares-needed": "1", - "shares-total": "1", - "shares-happy": "1", - "nickname": "bar", - "introducer": "pb://fooblam", - }), - ]) - w.create = mock.Mock(return_value=fake_wh) + run_cli, wormhole, code = yield defer.Deferred.fromCoroutine(open_wormhole()) + send_messages(wormhole, [ + {}, + { + "shares-needed": "1", + "shares-total": "1", + "shares-happy": "1", + "nickname": "bar", + "introducer": "pb://fooblam", + }, + ]) - rc, out, err = yield run_cli( - "create-client", - "--join", "1-alarmist-tuba", - "bar", - ) - self.assertNotEqual(rc, 0) - self.assertIn("Expected 'abilities' in server introduction", out + err) + rc, out, err = yield run_cli( + "create-client", + "--join", code, + "bar", + ) + self.assertNotEqual(rc, 0) + self.assertIn("Expected 'abilities' in server introduction", out + err) @defer.inlineCallbacks def test_invite_no_nick(self): @@ -425,13 +475,16 @@ shares.total = 6 """ intro_dir = os.path.join(self.basedir, "introducer") - with mock.patch('allmydata.scripts.tahoe_invite.wormhole'): - rc, out, err = yield run_cli( - "-d", intro_dir, - "invite", - "--shares-needed", "1", - "--shares-happy", "1", - "--shares-total", "1", - ) - self.assertTrue(rc) - self.assertIn(u"Provide a single argument", out + err) + options = runner.Options() + options.wormhole = None + + rc, out, err = yield run_cli( + "-d", intro_dir, + "invite", + "--shares-needed", "1", + "--shares-happy", "1", + "--shares-total", "1", + options=options, + ) + self.assertTrue(rc) + self.assertIn(u"Provide a single argument", out + err) diff --git a/src/allmydata/test/cli/test_run.py b/src/allmydata/test/cli/test_run.py index 28613e8c1..e84f52096 100644 --- a/src/allmydata/test/cli/test_run.py +++ b/src/allmydata/test/cli/test_run.py @@ -12,23 +12,19 @@ from future.utils import PY2 if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 +import re from six.moves import ( StringIO, ) -from testtools import ( - skipIf, -) +from hypothesis.strategies import text +from hypothesis import given, assume from testtools.matchers import ( Contains, Equals, - HasLength, ) -from twisted.python.runtime import ( - platform, -) from twisted.python.filepath import ( FilePath, ) @@ -44,6 +40,10 @@ from ...scripts.tahoe_run import ( RunOptions, run, ) +from ...util.pid import ( + check_pid_process, + InvalidPidFile, +) from ...scripts.runner import ( parse_options @@ -151,7 +151,7 @@ class RunTests(SyncTestCase): """ Tests for ``run``. """ - @skipIf(platform.isWindows(), "There are no PID files on Windows.") + def test_non_numeric_pid(self): """ If the pidfile exists but does not contain a numeric value, a complaint to @@ -159,7 +159,7 @@ class RunTests(SyncTestCase): """ basedir = FilePath(self.mktemp()).asTextMode() basedir.makedirs() - basedir.child(u"twistd.pid").setContent(b"foo") + basedir.child(u"running.process").setContent(b"foo") basedir.child(u"tahoe-client.tac").setContent(b"") config = RunOptions() @@ -168,17 +168,30 @@ class RunTests(SyncTestCase): config['basedir'] = basedir.path config.twistd_args = [] + reactor = MemoryReactor() + runs = [] - result_code = run(config, runApp=runs.append) + result_code = run(reactor, config, runApp=runs.append) self.assertThat( config.stderr.getvalue(), Contains("found invalid PID file in"), ) - self.assertThat( - runs, - HasLength(1), - ) - self.assertThat( - result_code, - Equals(0), - ) + # because the pidfile is invalid we shouldn't get to the + # .run() call itself. + self.assertThat(runs, Equals([])) + self.assertThat(result_code, Equals(1)) + + good_file_content_re = re.compile(r"\w[0-9]*\w[0-9]*\w") + + @given(text()) + def test_pidfile_contents(self, content): + """ + invalid contents for a pidfile raise errors + """ + assume(not self.good_file_content_re.match(content)) + pidfile = FilePath("pidfile") + pidfile.setContent(content.encode("utf8")) + + with self.assertRaises(InvalidPidFile): + with check_pid_process(pidfile): + pass diff --git a/src/allmydata/test/cli/wormholetesting.py b/src/allmydata/test/cli/wormholetesting.py new file mode 100644 index 000000000..744f9d75a --- /dev/null +++ b/src/allmydata/test/cli/wormholetesting.py @@ -0,0 +1,305 @@ +""" +An in-memory implementation of some of the magic-wormhole interfaces for +use by automated tests. + +For example:: + + async def peerA(mw): + wormhole = mw.create("myapp", "wss://myserver", reactor) + code = await wormhole.get_code() + print(f"I have a code: {code}") + message = await wormhole.when_received() + print(f"I have a message: {message}") + + async def local_peerB(helper, mw): + peerA_wormhole = await helper.wait_for_wormhole("myapp", "wss://myserver") + code = await peerA_wormhole.when_code() + + peerB_wormhole = mw.create("myapp", "wss://myserver") + peerB_wormhole.set_code(code) + + peerB_wormhole.send_message("Hello, peer A") + + # Run peerA against local_peerB with pure in-memory message passing. + server, helper = memory_server() + run(gather(peerA(server), local_peerB(helper, server))) + + # Run peerA against a peerB somewhere out in the world, using a real + # wormhole relay server somewhere. + import wormhole + run(peerA(wormhole)) +""" + +from __future__ import annotations + +from typing import Iterator, Optional, List, Tuple +from collections.abc import Awaitable +from inspect import getargspec +from itertools import count +from sys import stderr + +from attrs import frozen, define, field, Factory +from twisted.internet.defer import Deferred, DeferredQueue, succeed +from wormhole._interfaces import IWormhole +from wormhole.wormhole import create +from zope.interface import implementer + +WormholeCode = str +WormholeMessage = bytes +AppId = str +RelayURL = str +ApplicationKey = Tuple[RelayURL, AppId] + +@define +class MemoryWormholeServer(object): + """ + A factory for in-memory wormholes. + + :ivar _apps: Wormhole state arranged by the application id and relay URL + it belongs to. + + :ivar _waiters: Observers waiting for a wormhole to be created for a + specific application id and relay URL combination. + """ + _apps: dict[ApplicationKey, _WormholeApp] = field(default=Factory(dict)) + _waiters: dict[ApplicationKey, Deferred] = field(default=Factory(dict)) + + def create( + self, + appid, + relay_url, + reactor, + versions={}, + delegate=None, + journal=None, + tor=None, + timing=None, + stderr=stderr, + _eventual_queue=None, + _enable_dilate=False, + ): + """ + Create a wormhole. It will be able to connect to other wormholes created + by this instance (and constrained by the normal appid/relay_url + rules). + """ + if tor is not None: + raise ValueError("Cannot deal with Tor right now.") + if _enable_dilate: + raise ValueError("Cannot deal with dilation right now.") + + key = (relay_url, appid) + wormhole = _MemoryWormhole(self._view(key)) + if key in self._waiters: + self._waiters.pop(key).callback(wormhole) + return wormhole + + def _view(self, key: ApplicationKey) -> _WormholeServerView: + """ + Created a view onto this server's state that is limited by a certain + appid/relay_url pair. + """ + return _WormholeServerView(self, key) + + +@frozen +class TestingHelper(object): + """ + Provide extra functionality for interacting with an in-memory wormhole + implementation. + + This is intentionally a separate API so that it is not confused with + proper public interface of the real wormhole implementation. + """ + _server: MemoryWormholeServer + + async def wait_for_wormhole(self, appid: AppId, relay_url: RelayURL) -> IWormhole: + """ + Wait for a wormhole to appear at a specific location. + + :param appid: The appid that the resulting wormhole will have. + + :param relay_url: The URL of the relay at which the resulting wormhole + will presume to be created. + + :return: The first wormhole to be created which matches the given + parameters. + """ + key = (relay_url, appid) + if key in self._server._waiters: + raise ValueError(f"There is already a waiter for {key}") + d = Deferred() + self._server._waiters[key] = d + wormhole = await d + return wormhole + + +def _verify(): + """ + Roughly confirm that the in-memory wormhole creation function matches the + interface of the real implementation. + """ + # Poor man's interface verification. + + a = getargspec(create) + b = getargspec(MemoryWormholeServer.create) + # I know it has a `self` argument at the beginning. That's okay. + b = b._replace(args=b.args[1:]) + assert a == b, "{} != {}".format(a, b) + + +_verify() + + +@define +class _WormholeApp(object): + """ + Represent a collection of wormholes that belong to the same + appid/relay_url scope. + """ + wormholes: dict[WormholeCode, IWormhole] = field(default=Factory(dict)) + _waiting: dict[WormholeCode, List[Deferred]] = field(default=Factory(dict)) + _counter: Iterator[int] = field(default=Factory(count)) + + def allocate_code(self, wormhole: IWormhole, code: Optional[WormholeCode]) -> WormholeCode: + """ + Allocate a new code for the given wormhole. + + This also associates the given wormhole with the code for future + lookup. + + Code generation logic is trivial and certainly not good enough for any + real use. It is sufficient for automated testing, though. + """ + if code is None: + code = "{}-persnickety-tardigrade".format(next(self._counter)) + self.wormholes.setdefault(code, []).append(wormhole) + try: + waiters = self._waiting.pop(code) + except KeyError: + pass + else: + for w in waiters: + w.callback(wormhole) + + return code + + def wait_for_wormhole(self, code: WormholeCode) -> Awaitable[_MemoryWormhole]: + """ + Return a ``Deferred`` which fires with the next wormhole to be associated + with the given code. This is used to let the first end of a wormhole + rendezvous with the second end. + """ + d = Deferred() + self._waiting.setdefault(code, []).append(d) + return d + + +@frozen +class _WormholeServerView(object): + """ + Present an interface onto the server to be consumed by individual + wormholes. + """ + _server: MemoryWormholeServer + _key: ApplicationKey + + def allocate_code(self, wormhole: _MemoryWormhole, code: Optional[WormholeCode]) -> WormholeCode: + """ + Allocate a new code for the given wormhole in the scope associated with + this view. + """ + app = self._server._apps.setdefault(self._key, _WormholeApp()) + return app.allocate_code(wormhole, code) + + def wormhole_by_code(self, code: WormholeCode, exclude: object) -> Deferred[IWormhole]: + """ + Retrieve all wormholes previously associated with a code. + """ + app = self._server._apps[self._key] + wormholes = app.wormholes[code] + try: + [wormhole] = list(wormhole for wormhole in wormholes if wormhole != exclude) + except ValueError: + return app.wait_for_wormhole(code) + return succeed(wormhole) + + +@implementer(IWormhole) +@define +class _MemoryWormhole(object): + """ + Represent one side of a wormhole as conceived by ``MemoryWormholeServer``. + """ + + _view: _WormholeServerView + _code: Optional[WormholeCode] = None + _payload: DeferredQueue = field(default=Factory(DeferredQueue)) + _waiting_for_code: list[Deferred] = field(default=Factory(list)) + + def allocate_code(self) -> None: + if self._code is not None: + raise ValueError( + "allocate_code used with a wormhole which already has a code" + ) + self._code = self._view.allocate_code(self, None) + waiters = self._waiting_for_code + self._waiting_for_code = [] + for d in waiters: + d.callback(self._code) + + def set_code(self, code: WormholeCode) -> None: + if self._code is None: + self._code = code + self._view.allocate_code(self, code) + else: + raise ValueError("set_code used with a wormhole which already has a code") + + def when_code(self) -> Deferred[WormholeCode]: + if self._code is None: + d = Deferred() + self._waiting_for_code.append(d) + return d + return succeed(self._code) + + def get_welcome(self): + return succeed("welcome") + + def send_message(self, payload: WormholeMessage) -> None: + self._payload.put(payload) + + def when_received(self) -> Deferred[WormholeMessage]: + if self._code is None: + raise ValueError( + "This implementation requires set_code or allocate_code " + "before when_received." + ) + d = self._view.wormhole_by_code(self._code, exclude=self) + + def got_wormhole(wormhole): + msg = wormhole._payload.get() + return msg + + d.addCallback(got_wormhole) + return d + + get_message = when_received + + def close(self) -> None: + pass + + # 0.9.2 compatibility + def get_code(self) -> Deferred[WormholeCode]: + if self._code is None: + self.allocate_code() + return self.when_code() + + get = when_received + + +def memory_server() -> tuple[MemoryWormholeServer, TestingHelper]: + """ + Create a paired in-memory wormhole server and testing helper. + """ + server = MemoryWormholeServer() + return server, TestingHelper(server) diff --git a/src/allmydata/test/cli_node_api.py b/src/allmydata/test/cli_node_api.py index 410796be2..c324d5565 100644 --- a/src/allmydata/test/cli_node_api.py +++ b/src/allmydata/test/cli_node_api.py @@ -134,7 +134,7 @@ class CLINodeAPI(object): @property def twistd_pid_file(self): - return self.basedir.child(u"twistd.pid") + return self.basedir.child(u"running.process") @property def node_url_file(self): diff --git a/src/allmydata/test/common.py b/src/allmydata/test/common.py index 0f2dc7c62..b652b2e48 100644 --- a/src/allmydata/test/common.py +++ b/src/allmydata/test/common.py @@ -28,6 +28,7 @@ __all__ = [ import sys import os, random, struct +from contextlib import contextmanager import six import tempfile from tempfile import mktemp @@ -87,6 +88,7 @@ from allmydata.interfaces import ( SDMF_VERSION, MDMF_VERSION, IAddressFamily, + NoSpace, ) from allmydata.check_results import CheckResults, CheckAndRepairResults, \ DeepCheckResults, DeepCheckAndRepairResults @@ -131,14 +133,48 @@ from subprocess import ( PIPE, ) -TEST_RSA_KEY_SIZE = 522 - EMPTY_CLIENT_CONFIG = config_from_string( "/dev/null", "tub.port", "" ) +@attr.s +class FakeDisk(object): + """ + Just enough of a disk to be able to report free / used information. + """ + total = attr.ib() + used = attr.ib() + + def use(self, num_bytes): + """ + Mark some amount of available bytes as used (and no longer available). + + :param int num_bytes: The number of bytes to use. + + :raise NoSpace: If there are fewer bytes available than ``num_bytes``. + + :return: ``None`` + """ + if num_bytes > self.total - self.used: + raise NoSpace() + self.used += num_bytes + + @property + def available(self): + return self.total - self.used + + def get_disk_stats(self, whichdir, reserved_space): + avail = self.available + return { + 'total': self.total, + 'free_for_root': avail, + 'free_for_nonroot': avail, + 'used': self.used, + 'avail': avail - reserved_space, + } + @attr.s class MemoryIntroducerClient(object): @@ -267,8 +303,12 @@ class UseNode(object): node_config = attr.ib(default=attr.Factory(dict)) config = attr.ib(default=None) + reactor = attr.ib(default=None) def setUp(self): + self.assigner = SameProcessStreamEndpointAssigner() + self.assigner.setUp() + def format_config_items(config): return "\n".join( " = ".join((key, value)) @@ -292,6 +332,23 @@ class UseNode(object): "default", self.introducer_furl, ) + + node_config = self.node_config.copy() + if "tub.port" not in node_config: + if "tub.location" in node_config: + raise ValueError( + "UseNode fixture does not support specifying tub.location " + "without tub.port" + ) + + # Don't use the normal port auto-assignment logic. It produces + # collisions and makes tests fail spuriously. + tub_location, tub_endpoint = self.assigner.assign(self.reactor) + node_config.update({ + "tub.port": tub_endpoint, + "tub.location": tub_location, + }) + self.config = config_from_string( self.basedir.asTextMode().path, "tub.port", @@ -304,7 +361,7 @@ storage.plugins = {storage_plugin} {plugin_config_section} """.format( storage_plugin=self.storage_plugin, - node_config=format_config_items(self.node_config), + node_config=format_config_items(node_config), plugin_config_section=plugin_config_section, ) ) @@ -316,7 +373,7 @@ storage.plugins = {storage_plugin} ) def cleanUp(self): - pass + self.assigner.tearDown() def getDetails(self): @@ -1068,7 +1125,7 @@ def _corrupt_offset_of_uri_extension_to_force_short_read(data, debug=False): def _corrupt_mutable_share_data(data, debug=False): prefix = data[:32] - assert prefix == MutableShareFile.MAGIC, "This function is designed to corrupt mutable shares of v1, and the magic number doesn't look right: %r vs %r" % (prefix, MutableShareFile.MAGIC) + assert MutableShareFile.is_valid_header(prefix), "This function is designed to corrupt mutable shares of v1, and the magic number doesn't look right: %r vs %r" % (prefix, MutableShareFile.MAGIC) data_offset = MutableShareFile.DATA_OFFSET sharetype = data[data_offset:data_offset+1] assert sharetype == b"\x00", "non-SDMF mutable shares not supported" @@ -1213,6 +1270,29 @@ class ConstantAddresses(object): raise Exception("{!r} has no client endpoint.") return self._handler +@contextmanager +def disable_modules(*names): + """ + A context manager which makes modules appear to be missing while it is + active. + + :param *names: The names of the modules to disappear. Only top-level + modules are supported (that is, "." is not allowed in any names). + This is an implementation shortcoming which could be lifted if + desired. + """ + if any("." in name for name in names): + raise ValueError("Names containing '.' are not supported.") + missing = object() + modules = list(sys.modules.get(n, missing) for n in names) + for n in names: + sys.modules[n] = None + yield + for n, original in zip(names, modules): + if original is missing: + del sys.modules[n] + else: + sys.modules[n] = original class _TestCaseMixin(object): """ diff --git a/src/allmydata/test/common_storage.py b/src/allmydata/test/common_storage.py new file mode 100644 index 000000000..7adcafa43 --- /dev/null +++ b/src/allmydata/test/common_storage.py @@ -0,0 +1,60 @@ + +def upload_immutable(storage_server, storage_index, renew_secret, cancel_secret, shares): + """ + Synchronously upload some immutable shares to a ``StorageServer``. + + :param allmydata.storage.server.StorageServer storage_server: The storage + server object to use to perform the upload. + + :param bytes storage_index: The storage index for the immutable shares. + + :param bytes renew_secret: The renew secret for the implicitly created lease. + :param bytes cancel_secret: The cancel secret for the implicitly created lease. + + :param dict[int, bytes] shares: A mapping from share numbers to share data + to upload. The data for all shares must be of the same length. + + :return: ``None`` + """ + already, writers = storage_server.allocate_buckets( + storage_index, + renew_secret, + cancel_secret, + shares.keys(), + len(next(iter(shares.values()))), + ) + for shnum, writer in writers.items(): + writer.write(0, shares[shnum]) + writer.close() + + +def upload_mutable(storage_server, storage_index, secrets, shares): + """ + Synchronously upload some mutable shares to a ``StorageServer``. + + :param allmydata.storage.server.StorageServer storage_server: The storage + server object to use to perform the upload. + + :param bytes storage_index: The storage index for the immutable shares. + + :param secrets: A three-tuple of a write enabler, renew secret, and cancel + secret. + + :param dict[int, bytes] shares: A mapping from share numbers to share data + to upload. + + :return: ``None`` + """ + test_and_write_vectors = { + sharenum: ([], [(0, data)], None) + for sharenum, data + in shares.items() + } + read_vector = [] + + storage_server.slot_testv_and_readv_and_writev( + storage_index, + secrets, + test_and_write_vectors, + read_vector, + ) diff --git a/src/allmydata/test/common_system.py b/src/allmydata/test/common_system.py index 9d14c8642..01966824a 100644 --- a/src/allmydata/test/common_system.py +++ b/src/allmydata/test/common_system.py @@ -5,22 +5,14 @@ in ``allmydata.test.test_system``. Ported to Python 3. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -from future.utils import PY2 -if PY2: - # Don't import bytes since it causes issues on (so far unported) modules on Python 2. - from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, dict, list, object, range, max, min, str # noqa: F401 - +from typing import Optional import os from functools import partial from twisted.internet import reactor from twisted.internet import defer from twisted.internet.defer import inlineCallbacks +from twisted.internet.task import deferLater from twisted.application import service from foolscap.api import flushEventualQueue @@ -28,13 +20,18 @@ from foolscap.api import flushEventualQueue from allmydata import client from allmydata.introducer.server import create_introducer from allmydata.util import fileutil, log, pollmixin +from allmydata.util.deferredutil import async_to_deferred +from allmydata.storage import http_client +from allmydata.storage_client import ( + NativeStorageServer, + HTTPNativeStorageServer, +) from twisted.python.filepath import ( FilePath, ) from .common import ( - TEST_RSA_KEY_SIZE, SameProcessStreamEndpointAssigner, ) @@ -643,9 +640,51 @@ def _render_section_values(values): )) +@async_to_deferred +async def spin_until_cleanup_done(value=None, timeout=10): + """ + At the end of the test, spin until the reactor has no more DelayedCalls + and file descriptors (or equivalents) registered. This prevents dirty + reactor errors, while also not hard-coding a fixed amount of time, so it + can finish faster on faster computers. + + There is also a timeout: if it takes more than 10 seconds (by default) for + the remaining reactor state to clean itself up, the presumption is that it + will never get cleaned up and the spinning stops. + + Make sure to run as last thing in tearDown. + """ + def num_fds(): + if hasattr(reactor, "handles"): + # IOCP! + return len(reactor.handles) + else: + # Normal reactor; having internal readers still registered is fine, + # that's not our code. + return len( + set(reactor.getReaders()) - set(reactor._internalReaders) + ) + len(reactor.getWriters()) + + for i in range(timeout * 1000): + # There's a single DelayedCall for AsynchronousDeferredRunTest's + # timeout... + if (len(reactor.getDelayedCalls()) < 2 and num_fds() == 0): + break + await deferLater(reactor, 0.001) + return value + + class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): + # If set to True, use Foolscap for storage protocol. If set to False, HTTP + # will be used when possible. If set to None, this suggests a bug in the + # test code. + FORCE_FOOLSCAP_FOR_STORAGE : Optional[bool] = None + def setUp(self): + self._http_client_pools = [] + http_client.StorageClient.start_test_mode(self._got_new_http_connection_pool) + self.addCleanup(http_client.StorageClient.stop_test_mode) self.port_assigner = SameProcessStreamEndpointAssigner() self.port_assigner.setUp() self.addCleanup(self.port_assigner.tearDown) @@ -653,10 +692,35 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): self.sparent = service.MultiService() self.sparent.startService() + def _got_new_http_connection_pool(self, pool): + # Register the pool for shutdown later: + self._http_client_pools.append(pool) + # Disable retries: + pool.retryAutomatically = False + # Make a much more aggressive timeout for connections, we're connecting + # locally after all... and also make sure it's lower than the delay we + # add in tearDown, to prevent dirty reactor issues. + getConnection = pool.getConnection + + def getConnectionWithTimeout(*args, **kwargs): + d = getConnection(*args, **kwargs) + d.addTimeout(1, reactor) + return d + + pool.getConnection = getConnectionWithTimeout + + def close_idle_http_connections(self): + """Close all HTTP client connections that are just hanging around.""" + return defer.gatherResults( + [pool.closeCachedConnections() for pool in self._http_client_pools] + ) + def tearDown(self): log.msg("shutting down SystemTest services") d = self.sparent.stopService() d.addBoth(flush_but_dont_ignore) + d.addBoth(lambda x: self.close_idle_http_connections().addCallback(lambda _: x)) + d.addBoth(spin_until_cleanup_done) return d def getdir(self, subdir): @@ -672,11 +736,14 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): """ iv_dir = self.getdir("introducer") if not os.path.isdir(iv_dir): - _, port_endpoint = self.port_assigner.assign(reactor) + _, web_port_endpoint = self.port_assigner.assign(reactor) + main_location_hint, main_port_endpoint = self.port_assigner.assign(reactor) introducer_config = ( u"[node]\n" u"nickname = introducer \N{BLACK SMILING FACE}\n" + - u"web.port = {}\n".format(port_endpoint) + u"web.port = {}\n".format(web_port_endpoint) + + u"tub.port = {}\n".format(main_port_endpoint) + + u"tub.location = {}\n".format(main_location_hint) ).encode("utf-8") fileutil.make_dirs(iv_dir) @@ -712,28 +779,37 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): :return: A ``Deferred`` that fires when the nodes have connected to each other. """ + self.assertIn( + self.FORCE_FOOLSCAP_FOR_STORAGE, (True, False), + "You forgot to set FORCE_FOOLSCAP_FOR_STORAGE on {}".format(self.__class__) + ) self.numclients = NUMCLIENTS self.introducer = yield self._create_introducer() self.add_service(self.introducer) self.introweb_url = self._get_introducer_web() - yield self._set_up_client_nodes() + yield self._set_up_client_nodes(self.FORCE_FOOLSCAP_FOR_STORAGE) + native_server = next(iter(self.clients[0].storage_broker.get_known_servers())) + if self.FORCE_FOOLSCAP_FOR_STORAGE: + expected_storage_server_class = NativeStorageServer + else: + expected_storage_server_class = HTTPNativeStorageServer + self.assertIsInstance(native_server, expected_storage_server_class) @inlineCallbacks - def _set_up_client_nodes(self): + def _set_up_client_nodes(self, force_foolscap): q = self.introducer self.introducer_furl = q.introducer_url self.clients = [] basedirs = [] for i in range(self.numclients): - basedirs.append((yield self._set_up_client_node(i))) + basedirs.append((yield self._set_up_client_node(i, force_foolscap))) # start clients[0], wait for it's tub to be ready (at which point it # will have registered the helper furl). c = yield client.create_client(basedirs[0]) c.setServiceParent(self.sparent) self.clients.append(c) - c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE) with open(os.path.join(basedirs[0],"private","helper.furl"), "r") as f: helper_furl = f.read() @@ -751,7 +827,6 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): c = yield client.create_client(basedirs[i]) c.setServiceParent(self.sparent) self.clients.append(c) - c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE) log.msg("STARTING") yield self.wait_for_connections() log.msg("CONNECTED") @@ -761,16 +836,18 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): # and the helper-using webport self.helper_webish_url = self.clients[3].getServiceNamed("webish").getURL() - def _generate_config(self, which, basedir): + def _generate_config(self, which, basedir, force_foolscap=False): config = {} - except1 = set(range(self.numclients)) - {1} + allclients = set(range(self.numclients)) + except1 = allclients - {1} feature_matrix = { ("client", "nickname"): except1, - # client 1 has to auto-assign an address. - ("node", "tub.port"): except1, - ("node", "tub.location"): except1, + # Auto-assigning addresses is extremely failure prone and not + # amenable to automated testing in _this_ manner. + ("node", "tub.port"): allclients, + ("node", "tub.location"): allclients, # client 0 runs a webserver and a helper # client 3 runs a webserver but no helper @@ -789,6 +866,7 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): sethelper = partial(setconf, config, which, "helper") setnode("nickname", u"client %d \N{BLACK SMILING FACE}" % (which,)) + setconf(config, which, "storage", "force_foolscap", str(force_foolscap)) tub_location_hint, tub_port_endpoint = self.port_assigner.assign(reactor) setnode("tub.port", tub_port_endpoint) @@ -806,17 +884,16 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): " furl: %s\n") % self.introducer_furl iyaml_fn = os.path.join(basedir, "private", "introducers.yaml") fileutil.write(iyaml_fn, iyaml) - return _render_config(config) - def _set_up_client_node(self, which): + def _set_up_client_node(self, which, force_foolscap): basedir = self.getdir("client%d" % (which,)) fileutil.make_dirs(os.path.join(basedir, "private")) if len(SYSTEM_TEST_CERTS) > (which + 1): f = open(os.path.join(basedir, "private", "node.pem"), "w") f.write(SYSTEM_TEST_CERTS[which + 1]) f.close() - config = self._generate_config(which, basedir) + config = self._generate_config(which, basedir, force_foolscap) fileutil.write(os.path.join(basedir, 'tahoe.cfg'), config) return basedir @@ -833,7 +910,6 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): def _stopped(res): new_c = yield client.create_client(self.getdir("client%d" % num)) self.clients[num] = new_c - new_c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE) new_c.setServiceParent(self.sparent) d.addCallback(_stopped) d.addCallback(lambda res: self.wait_for_connections()) @@ -852,7 +928,13 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): # connection-lost code basedir = FilePath(self.getdir("client%d" % client_num)) basedir.makedirs() - config = "[client]\n" + config = ( + "[node]\n" + "tub.location = {}\n" + "tub.port = {}\n" + "[client]\n" + ).format(*self.port_assigner.assign(reactor)) + if helper_furl: config += "helper.furl = %s\n" % helper_furl basedir.child("tahoe.cfg").setContent(config.encode("utf-8")) @@ -866,7 +948,6 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): c = yield client.create_client(basedir.path) self.clients.append(c) - c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE) self.numclients += 1 if add_to_sparent: c.setServiceParent(self.sparent) diff --git a/src/allmydata/test/common_util.py b/src/allmydata/test/common_util.py index d2d20916d..b6d352ab1 100644 --- a/src/allmydata/test/common_util.py +++ b/src/allmydata/test/common_util.py @@ -69,6 +69,9 @@ def run_cli_native(verb, *args, **kwargs): Most code should prefer ``run_cli_unicode`` which deals with all the necessary encoding considerations. + :param runner.Options options: The options instance to use to parse the + given arguments. + :param native_str verb: The command to run. For example, ``"create-node"``. @@ -88,6 +91,7 @@ def run_cli_native(verb, *args, **kwargs): matching native behavior. If True, stdout/stderr are returned as bytes. """ + options = kwargs.pop("options", runner.Options()) nodeargs = kwargs.pop("nodeargs", []) encoding = kwargs.pop("encoding", None) or getattr(sys.stdout, "encoding") or "utf-8" return_bytes = kwargs.pop("return_bytes", False) @@ -134,13 +138,14 @@ def run_cli_native(verb, *args, **kwargs): d.addCallback( partial( runner.parse_or_exit, - runner.Options(), + options, ), stdout=stdout, stderr=stderr, ) d.addCallback( runner.dispatch, + reactor, stdin=stdin, stdout=stdout, stderr=stderr, diff --git a/src/allmydata/test/data/lease_checker.history.txt b/src/allmydata/test/data/lease_checker.history.txt new file mode 100644 index 000000000..0c27a5ad0 --- /dev/null +++ b/src/allmydata/test/data/lease_checker.history.txt @@ -0,0 +1,501 @@ +(dp0 +I363 +(dp1 +Vconfigured-expiration-mode +p2 +(S'age' +p3 +NN(S'immutable' +p4 +S'mutable' +p5 +tp6 +tp7 +sVexpiration-enabled +p8 +I00 +sVleases-per-share-histogram +p9 +(dp10 +I1 +I39774 +ssVlease-age-histogram +p11 +(lp12 +(I0 +I86400 +I3125 +tp13 +a(I345600 +I432000 +I4175 +tp14 +a(I950400 +I1036800 +I141 +tp15 +a(I1036800 +I1123200 +I345 +tp16 +a(I1123200 +I1209600 +I81 +tp17 +a(I1296000 +I1382400 +I1832 +tp18 +a(I1555200 +I1641600 +I390 +tp19 +a(I1728000 +I1814400 +I12 +tp20 +a(I2073600 +I2160000 +I84 +tp21 +a(I2160000 +I2246400 +I228 +tp22 +a(I2246400 +I2332800 +I75 +tp23 +a(I2592000 +I2678400 +I644 +tp24 +a(I2678400 +I2764800 +I273 +tp25 +a(I2764800 +I2851200 +I94 +tp26 +a(I2851200 +I2937600 +I97 +tp27 +a(I3196800 +I3283200 +I143 +tp28 +a(I3283200 +I3369600 +I48 +tp29 +a(I4147200 +I4233600 +I374 +tp30 +a(I4320000 +I4406400 +I534 +tp31 +a(I5270400 +I5356800 +I1005 +tp32 +a(I6739200 +I6825600 +I8704 +tp33 +a(I6825600 +I6912000 +I3986 +tp34 +a(I6912000 +I6998400 +I7592 +tp35 +a(I6998400 +I7084800 +I2607 +tp36 +a(I7689600 +I7776000 +I35 +tp37 +a(I8035200 +I8121600 +I33 +tp38 +a(I8294400 +I8380800 +I54 +tp39 +a(I8640000 +I8726400 +I45 +tp40 +a(I8726400 +I8812800 +I27 +tp41 +a(I8812800 +I8899200 +I12 +tp42 +a(I9763200 +I9849600 +I77 +tp43 +a(I9849600 +I9936000 +I91 +tp44 +a(I9936000 +I10022400 +I1210 +tp45 +a(I10022400 +I10108800 +I45 +tp46 +a(I10108800 +I10195200 +I186 +tp47 +a(I10368000 +I10454400 +I113 +tp48 +a(I10972800 +I11059200 +I21 +tp49 +a(I11232000 +I11318400 +I5 +tp50 +a(I11318400 +I11404800 +I19 +tp51 +a(I11404800 +I11491200 +I238 +tp52 +a(I11491200 +I11577600 +I159 +tp53 +a(I11750400 +I11836800 +I1 +tp54 +a(I11836800 +I11923200 +I32 +tp55 +a(I11923200 +I12009600 +I192 +tp56 +a(I12009600 +I12096000 +I222 +tp57 +a(I12096000 +I12182400 +I18 +tp58 +a(I12182400 +I12268800 +I224 +tp59 +a(I12268800 +I12355200 +I9 +tp60 +a(I12355200 +I12441600 +I9 +tp61 +a(I12441600 +I12528000 +I10 +tp62 +a(I12528000 +I12614400 +I6 +tp63 +a(I12614400 +I12700800 +I6 +tp64 +a(I12700800 +I12787200 +I18 +tp65 +a(I12787200 +I12873600 +I6 +tp66 +a(I12873600 +I12960000 +I62 +tp67 +asVcycle-start-finish-times +p68 +(F1634446505.241972 +F1634446666.055401 +tp69 +sVspace-recovered +p70 +(dp71 +Vexamined-buckets-immutable +p72 +I17896 +sVconfigured-buckets-mutable +p73 +I0 +sVexamined-shares-mutable +p74 +I2473 +sVoriginal-shares-mutable +p75 +I1185 +sVconfigured-buckets-immutable +p76 +I0 +sVoriginal-shares-immutable +p77 +I27457 +sVoriginal-diskbytes-immutable +p78 +I2810982400 +sVexamined-shares-immutable +p79 +I37301 +sVoriginal-buckets +p80 +I14047 +sVactual-shares-immutable +p81 +I0 +sVconfigured-shares +p82 +I0 +sVoriginal-buckets-mutable +p83 +I691 +sVactual-diskbytes +p84 +I4096 +sVactual-shares-mutable +p85 +I0 +sVconfigured-buckets +p86 +I1 +sVexamined-buckets-unknown +p87 +I14 +sVactual-sharebytes +p88 +I0 +sVoriginal-shares +p89 +I28642 +sVactual-buckets-immutable +p90 +I0 +sVoriginal-sharebytes +p91 +I2695552941 +sVexamined-sharebytes-immutable +p92 +I2754798505 +sVactual-shares +p93 +I0 +sVactual-sharebytes-immutable +p94 +I0 +sVoriginal-diskbytes +p95 +I2818981888 +sVconfigured-diskbytes-mutable +p96 +I0 +sVconfigured-sharebytes-immutable +p97 +I0 +sVconfigured-shares-mutable +p98 +I0 +sVactual-diskbytes-immutable +p99 +I0 +sVconfigured-diskbytes-immutable +p100 +I0 +sVoriginal-diskbytes-mutable +p101 +I7995392 +sVactual-sharebytes-mutable +p102 +I0 +sVconfigured-sharebytes +p103 +I0 +sVexamined-shares +p104 +I39774 +sVactual-diskbytes-mutable +p105 +I0 +sVactual-buckets +p106 +I1 +sVoriginal-buckets-immutable +p107 +I13355 +sVconfigured-sharebytes-mutable +p108 +I0 +sVexamined-sharebytes +p109 +I2763646972 +sVoriginal-sharebytes-immutable +p110 +I2692076909 +sVoriginal-sharebytes-mutable +p111 +I3476032 +sVactual-buckets-mutable +p112 +I0 +sVexamined-buckets-mutable +p113 +I1286 +sVconfigured-shares-immutable +p114 +I0 +sVexamined-diskbytes +p115 +I2854801408 +sVexamined-diskbytes-mutable +p116 +I12161024 +sVexamined-sharebytes-mutable +p117 +I8848467 +sVexamined-buckets +p118 +I19197 +sVconfigured-diskbytes +p119 +I4096 +sVexamined-diskbytes-immutable +p120 +I2842640384 +ssVcorrupt-shares +p121 +(lp122 +(V2dn6xnlnsqwtnapwxfdivpm3s4 +p123 +I3 +tp124 +a(g123 +I0 +tp125 +a(V2rrzthwsrrxolevmwdvbdy3rqi +p126 +I3 +tp127 +a(g126 +I0 +tp128 +a(V2skfngcto6h7eqmn4uo7ntk3ne +p129 +I3 +tp130 +a(g129 +I0 +tp131 +a(V32d5swqpqx2mwix7xmqzvhdwje +p132 +I3 +tp133 +a(g132 +I0 +tp134 +a(V5mmayp66yflmpon3o6unsnbaca +p135 +I3 +tp136 +a(g135 +I0 +tp137 +a(V6ixhpvbtre7fnrl6pehlrlflc4 +p138 +I3 +tp139 +a(g138 +I0 +tp140 +a(Vewzhvswjsz4vp2bqkb6mi3bz2u +p141 +I3 +tp142 +a(g141 +I0 +tp143 +a(Vfu7pazf6ogavkqj6z4q5qqex3u +p144 +I3 +tp145 +a(g144 +I0 +tp146 +a(Vhbyjtqvpcimwxiyqbcbbdn2i4a +p147 +I3 +tp148 +a(g147 +I0 +tp149 +a(Vpmcjbdkbjdl26k3e6yja77femq +p150 +I3 +tp151 +a(g150 +I0 +tp152 +a(Vr6swof4v2uttbiiqwj5pi32cm4 +p153 +I3 +tp154 +a(g153 +I0 +tp155 +a(Vt45v5akoktf53evc2fi6gwnv6y +p156 +I3 +tp157 +a(g156 +I0 +tp158 +a(Vy6zb4faar3rdvn3e6pfg4wlotm +p159 +I3 +tp160 +a(g159 +I0 +tp161 +a(Vz3yghutvqoqbchjao4lndnrh3a +p162 +I3 +tp163 +a(g162 +I0 +tp164 +ass. \ No newline at end of file diff --git a/src/allmydata/test/data/lease_checker.state.txt b/src/allmydata/test/data/lease_checker.state.txt new file mode 100644 index 000000000..b32554434 --- /dev/null +++ b/src/allmydata/test/data/lease_checker.state.txt @@ -0,0 +1,545 @@ +(dp1 +S'last-complete-prefix' +p2 +NsS'version' +p3 +I1 +sS'current-cycle-start-time' +p4 +F1635003106.611748 +sS'last-cycle-finished' +p5 +I312 +sS'cycle-to-date' +p6 +(dp7 +Vleases-per-share-histogram +p8 +(dp9 +I1 +I36793 +sI2 +I1 +ssVspace-recovered +p10 +(dp11 +Vexamined-buckets-immutable +p12 +I17183 +sVconfigured-buckets-mutable +p13 +I0 +sVexamined-shares-mutable +p14 +I1796 +sVoriginal-shares-mutable +p15 +I1563 +sVconfigured-buckets-immutable +p16 +I0 +sVoriginal-shares-immutable +p17 +I27926 +sVoriginal-diskbytes-immutable +p18 +I431149056 +sVexamined-shares-immutable +p19 +I34998 +sVoriginal-buckets +p20 +I14661 +sVactual-shares-immutable +p21 +I0 +sVconfigured-shares +p22 +I0 +sVoriginal-buckets-immutable +p23 +I13761 +sVactual-diskbytes +p24 +I4096 +sVactual-shares-mutable +p25 +I0 +sVconfigured-buckets +p26 +I1 +sVexamined-buckets-unknown +p27 +I14 +sVactual-sharebytes +p28 +I0 +sVoriginal-shares +p29 +I29489 +sVoriginal-sharebytes +p30 +I312664812 +sVexamined-sharebytes-immutable +p31 +I383801602 +sVactual-shares +p32 +I0 +sVactual-sharebytes-immutable +p33 +I0 +sVoriginal-diskbytes +p34 +I441643008 +sVconfigured-diskbytes-mutable +p35 +I0 +sVconfigured-sharebytes-immutable +p36 +I0 +sVconfigured-shares-mutable +p37 +I0 +sVactual-diskbytes-immutable +p38 +I0 +sVconfigured-diskbytes-immutable +p39 +I0 +sVoriginal-diskbytes-mutable +p40 +I10489856 +sVactual-sharebytes-mutable +p41 +I0 +sVconfigured-sharebytes +p42 +I0 +sVexamined-shares +p43 +I36794 +sVactual-diskbytes-mutable +p44 +I0 +sVactual-buckets +p45 +I1 +sVoriginal-buckets-mutable +p46 +I899 +sVconfigured-sharebytes-mutable +p47 +I0 +sVexamined-sharebytes +p48 +I390369660 +sVoriginal-sharebytes-immutable +p49 +I308125753 +sVoriginal-sharebytes-mutable +p50 +I4539059 +sVactual-buckets-mutable +p51 +I0 +sVexamined-diskbytes-mutable +p52 +I9154560 +sVexamined-buckets-mutable +p53 +I1043 +sVconfigured-shares-immutable +p54 +I0 +sVexamined-diskbytes +p55 +I476598272 +sVactual-buckets-immutable +p56 +I0 +sVexamined-sharebytes-mutable +p57 +I6568058 +sVexamined-buckets +p58 +I18241 +sVconfigured-diskbytes +p59 +I4096 +sVexamined-diskbytes-immutable +p60 +I467443712 +ssVcorrupt-shares +p61 +(lp62 +(V2dn6xnlnsqwtnapwxfdivpm3s4 +p63 +I4 +tp64 +a(g63 +I1 +tp65 +a(V2rrzthwsrrxolevmwdvbdy3rqi +p66 +I4 +tp67 +a(g66 +I1 +tp68 +a(V2skfngcto6h7eqmn4uo7ntk3ne +p69 +I4 +tp70 +a(g69 +I1 +tp71 +a(V32d5swqpqx2mwix7xmqzvhdwje +p72 +I4 +tp73 +a(g72 +I1 +tp74 +a(V5mmayp66yflmpon3o6unsnbaca +p75 +I4 +tp76 +a(g75 +I1 +tp77 +a(V6ixhpvbtre7fnrl6pehlrlflc4 +p78 +I4 +tp79 +a(g78 +I1 +tp80 +a(Vewzhvswjsz4vp2bqkb6mi3bz2u +p81 +I4 +tp82 +a(g81 +I1 +tp83 +a(Vfu7pazf6ogavkqj6z4q5qqex3u +p84 +I4 +tp85 +a(g84 +I1 +tp86 +a(Vhbyjtqvpcimwxiyqbcbbdn2i4a +p87 +I4 +tp88 +a(g87 +I1 +tp89 +a(Vpmcjbdkbjdl26k3e6yja77femq +p90 +I4 +tp91 +a(g90 +I1 +tp92 +a(Vr6swof4v2uttbiiqwj5pi32cm4 +p93 +I4 +tp94 +a(g93 +I1 +tp95 +a(Vt45v5akoktf53evc2fi6gwnv6y +p96 +I4 +tp97 +a(g96 +I1 +tp98 +a(Vy6zb4faar3rdvn3e6pfg4wlotm +p99 +I4 +tp100 +a(g99 +I1 +tp101 +a(Vz3yghutvqoqbchjao4lndnrh3a +p102 +I4 +tp103 +a(g102 +I1 +tp104 +asVlease-age-histogram +p105 +(dp106 +(I45619200 +I45705600 +tp107 +I4 +s(I12441600 +I12528000 +tp108 +I78 +s(I11923200 +I12009600 +tp109 +I89 +s(I33436800 +I33523200 +tp110 +I7 +s(I37411200 +I37497600 +tp111 +I4 +s(I38361600 +I38448000 +tp112 +I5 +s(I4665600 +I4752000 +tp113 +I256 +s(I11491200 +I11577600 +tp114 +I20 +s(I10713600 +I10800000 +tp115 +I183 +s(I42076800 +I42163200 +tp116 +I4 +s(I47865600 +I47952000 +tp117 +I7 +s(I3110400 +I3196800 +tp118 +I328 +s(I5788800 +I5875200 +tp119 +I954 +s(I9331200 +I9417600 +tp120 +I12 +s(I7430400 +I7516800 +tp121 +I7228 +s(I1555200 +I1641600 +tp122 +I492 +s(I37929600 +I38016000 +tp123 +I3 +s(I38880000 +I38966400 +tp124 +I3 +s(I12528000 +I12614400 +tp125 +I193 +s(I10454400 +I10540800 +tp126 +I1239 +s(I11750400 +I11836800 +tp127 +I7 +s(I950400 +I1036800 +tp128 +I4435 +s(I44409600 +I44496000 +tp129 +I13 +s(I12787200 +I12873600 +tp130 +I218 +s(I10368000 +I10454400 +tp131 +I117 +s(I3283200 +I3369600 +tp132 +I86 +s(I7516800 +I7603200 +tp133 +I993 +s(I42336000 +I42422400 +tp134 +I33 +s(I46310400 +I46396800 +tp135 +I1 +s(I39052800 +I39139200 +tp136 +I51 +s(I7603200 +I7689600 +tp137 +I2004 +s(I10540800 +I10627200 +tp138 +I16 +s(I36374400 +I36460800 +tp139 +I3 +s(I3369600 +I3456000 +tp140 +I79 +s(I12700800 +I12787200 +tp141 +I25 +s(I4838400 +I4924800 +tp142 +I386 +s(I10972800 +I11059200 +tp143 +I122 +s(I8812800 +I8899200 +tp144 +I57 +s(I38966400 +I39052800 +tp145 +I61 +s(I3196800 +I3283200 +tp146 +I628 +s(I9244800 +I9331200 +tp147 +I73 +s(I30499200 +I30585600 +tp148 +I5 +s(I12009600 +I12096000 +tp149 +I329 +s(I12960000 +I13046400 +tp150 +I8 +s(I12614400 +I12700800 +tp151 +I210 +s(I3801600 +I3888000 +tp152 +I32 +s(I10627200 +I10713600 +tp153 +I43 +s(I44928000 +I45014400 +tp154 +I2 +s(I8208000 +I8294400 +tp155 +I38 +s(I8640000 +I8726400 +tp156 +I32 +s(I7344000 +I7430400 +tp157 +I12689 +s(I49075200 +I49161600 +tp158 +I19 +s(I2764800 +I2851200 +tp159 +I76 +s(I2592000 +I2678400 +tp160 +I40 +s(I2073600 +I2160000 +tp161 +I388 +s(I37497600 +I37584000 +tp162 +I11 +s(I1641600 +I1728000 +tp163 +I78 +s(I12873600 +I12960000 +tp164 +I5 +s(I1814400 +I1900800 +tp165 +I1860 +s(I40176000 +I40262400 +tp166 +I1 +s(I3715200 +I3801600 +tp167 +I104 +s(I2332800 +I2419200 +tp168 +I12 +s(I2678400 +I2764800 +tp169 +I278 +s(I12268800 +I12355200 +tp170 +I2 +s(I28771200 +I28857600 +tp171 +I6 +s(I41990400 +I42076800 +tp172 +I10 +sssS'last-complete-bucket' +p173 +NsS'current-cycle' +p174 +Ns. \ No newline at end of file diff --git a/src/allmydata/test/data/pycryptopp-rsa-1024-priv.txt b/src/allmydata/test/data/pycryptopp-rsa-1024-priv.txt new file mode 100644 index 000000000..6f5e67950 --- /dev/null +++ b/src/allmydata/test/data/pycryptopp-rsa-1024-priv.txt @@ -0,0 +1 @@ +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAJLEAfZueLuT4vUQ1+c8ZM9dJ/LA29CYgA5toaMklQjbVQ2Skywvw1wEkRjhMpjQAx5+lpLTE2xCtqtfkHooMRNnquOxoh0o1Xya60jUHze7VB5QMV7BMKeUTff1hQqpIgw/GLvJRtar53cVY+SYf4SXx2/slDbVr8BI3DPwdeNtAgERAoGABzHD3GTJrteQJRxu+cQ3I0NPwx2IQ/Nlplq1GZDaIQ/FbJY+bhZrdXOswnl4cOcPNjNhu+c1qHGznv0ntayjCGgJ9dDySGqknDau+ezZcBO1JrIpPOABS7MVMst79mn47vB2+t8w5krrBYahAVp/L5kY8k+Pr9AU+L9mbevFW9MCQQDA+bAeMRNBfGc4gvoVV8ecovE1KRksFDlkaDVEOc76zNW6JZazHhQF/zIoMkV81rrg5UBntw3WR3R8A3l9osgDAkEAwrLQICJ3zjsJBt0xEkCBv9tK6IvSIc7MUQIc4J2Y1hiSjqsnTRACRy3UMsODfx/Lg7ITlDbABCLfv3v4D39jzwJBAKpFuYQNLxuqALlkgk8RN6hTiYlCYYE/BXa2TR4U4848RBy3wTSiEarwO1Ck0+afWZlCwFuDZo/kshMSH+dTZS8CQQC3PuIAIHDCGXHoV7W200zwzmSeoba2aEfTxcDTZyZvJi+VVcqi4eQGwbioP4rR/86aEQNeUaWpijv/g7xK0j/RAkBbt2U9bFFcja10KIpgw2bBxDU/c67h4+38lkrBUnM9XVBZxjbtQbnkkeAfOgQDiq3oBDBrHF3/Q8XM0CzZJBWS \ No newline at end of file diff --git a/src/allmydata/test/data/pycryptopp-rsa-32768-priv.txt b/src/allmydata/test/data/pycryptopp-rsa-32768-priv.txt new file mode 100644 index 000000000..d949f3f60 --- /dev/null +++ b/src/allmydata/test/data/pycryptopp-rsa-32768-priv.txt @@ -0,0 +1 @@ +MIJIQQIBADANBgkqhkiG9w0BAQEFAASCSCswgkgnAgEAAoIQAQC3x9r2dfYoTp7oIMsPdOhyNK5CB3TOtiaxhf3EkGAIaLWTXUVbxvOkiSu3Tca9VqFVnN7EkbT790uDjh4rviGeZF8oplVN+FDxKfcg5tXWv4ec9LnOUUAVRUnrUQA2azkOT+ozXQwZnJwUYr210VoV8D0MkrvOzNgGpb8aErDhW8SwrJcoYkObIE7n3C3zEMaEIyA1OFWSJDiXNGnBDvO54t1/y+/o4IuzLWWG7TPx8hnV+jcHRoxJTX2MZusJ7kugvxhgB0+avwXFTQr6ogvPNcUXak0+aLInLRtkYJ+0DYqo1hLAh8EBY/cLrhZM5LGGC4BAwGgUwsx3KKeOeduNnob3s/1rZpvZGwbGtfiWYQwDB8q68j3Ypf2Qvn7hPwicdOr0Dwe4TXJQ4yRHPeQaToOBUjtTJnrHsKDZET6i+jQ9e07Ct+yYrUwZjiaSXJYU/gCyPCui7L37NasXBJ00f1Ogm3gt4uxl3abO8mO1nKSWM+HbFBEyyO0apT+sSwYj6IL7cyCSJtWYMD4APdW5rXSArhyiaHV+xNbVUXAdBrZSNuwet925hTOf4IQD9uqfzeV3HIoiUCxn5GKYPZy01Kft+DExuDbJjMmES2GhfPWRIFB5MN0UdjlagDHLzFraQUcLTDKlxL0iZ+uV4Itv5dQyaf93Szu2LD1jnkvZOV5GN1RxTmZCH1FIPYCNwS6mIRG/4aPWA0HCZX8HzSMOBshAS6wECaoLWxv8D3K4Tm1rp/EgP7NZRxTj2ToOostJtjzTrVb3f3+zaT5svxD1Exw8tA1fZNRThIDKZXVSSLDYaiRDAUg7xEMD2eDCvNQasjAwX5Tnw7R4M/CZoZhgYVwIE+vHQTh8H+M/J8CNLxPT4N3fuXCqT8YoJVUOmKHe0kE5Rtd87X2BQY5SSx6LFMRRSVdBBpWB6cwLo8egehYAScEDQh0ht/ssaraWZ2LGt5hZL0I5V58iS/6C4IOu+1ry75g6mecWoHD0fBQELB3Q3Qi6c6Hik/jgTLQHb5UMqKj/MDSdTWuxwH2dYU5H4EGAkbfufBoxw9hIpdeS7/aDulvRKtFVPfi/pxmrd1lxQCBA4ionRe4IOY0E9i419TOgMtGgZxNlEXtp445MbeIlurxIDIX8N+RGHWljGR/9K6sjbgtGKyKLUxg51DZeuDKQGdyKXtIIkZ+Od9HN+3Mv0Ch5B9htIRV9hE6oLWLT+grqJCFAOD3olGgrRXByDsd8YouahYfjqb4KNCOyFPS3j5MdUpq+fiLrG3O98/L/xtmXxw+ekl95EGAnlwiCwULsjzVjHJDzSc68cldMnzNqLwhwWXpc0iswCWCQVFce/d1KlWqrtwq2ThH2pX3BJ5Pnu+KMISNNC/tagLe9vjmrh6ZhEks7hefn0srytJdivGDFqMs/ISmcld0U/0ZqE05b7BpErpfVrG9kb5QxWBTpaEb2O0pRsaYRcllFuNF6Nl/jPDBnn4BMYnOFnn9OKGPEDUeV/6CYP9x+Wi96M5Ni6vtv+zw9Xg8drslS5DJazXQFbJ0aqW3EgalUJVV0NgykB6Hr4pxTzrwo0+R/ro32DEj5OfjjU7TB4fYie0eax8tpdvzcWJRZ/c5b/Dg1yK+hbiMg9aTctHAsYJkOvMpxvull20IuV2sErWZ7KZhId19AFOnEQ6ILlHRwUf35AyEVmUL5BqLl137EeEVShEmage4+E/N6PdKzJdJGl1AQGyb7NTD86m0Jj2+8qu6zsBgyUfiJqZ17fixKV6l9HGJKSmY9If2XrX/IhNZ5dvqSmODJ1ZRGC5gjJcxcdHp2Q1179SlNmXiR/7DMcprL/+iVhRyxzM2GEJ78q9jS6j/Z+0vLzdNOPo1KxD191ogYjl5ck9gnHAkbaiANaK4rrfMytDkNm0JRua4p0mVyVHWZWwatoMhJxVl3+9x37OkF24ICTJZ4LSKDLJxi9WCQbhgACIA1mjcW0P+4AszpbuSXOQkPtT+MQ0IxHMzX261yHAIPbGsbSzoTy+PWJywFdMDy5afXDTNpmMfpzWkw2fhBQasNoGHl2CwFftJdr4WWxuN6mSwhNVHJTw1xe4A5fa6bjip5kmrLQK85YF4Ron0OIOofjcCzvjKCkNkGVKBhRiqBoqV6Pzz1XauVHFhFgZZNWXI+le+Fg9SJojeDtFQp5w6dZKBJMxV2uNPqV0U4VOtvAas2+Ul4zIJDB/FJyDX8POrsR+VkW7via64xM1hQlOZ5ispEOUvmO/NWkAsJM0n3S7qgud6NaFqOofQZcbh5r1z2uIrXwUIb85m2t/sPJBI1J/Dql4dmzgfn/q6Siqi8FeDoma/lQBZWyEeGz+/ckHdw/BGPx5FZlc8xLegNrQj4sVkUZXVAjNoUguA5HT9GcAmE5FeOHdHtD0bdTaNFkQbKdi3yUlGA1GZeyPThwfBaizgX3i6oOtGguX3HQMQtExip5xR2vsiYJsbWXuzlKEws8GwXoiJo8xEh+TPavxxtZ7dDdnJY1mUhKTVGLBCqCrJ+uhWdWuHKvC9x++V5NO6WQrUiG/o8oOwkpWyH7GC/VtulpxkoJlxAej3JxlHn91cN4PstDo4goOhQBi9k2A5rsmvjGG75BOKlqvhaQ6BPOa+9F5D5H0RhT0hw43TZmJri+0Ba2WT3FigcHHYGtx4UJfyqfg7d+WXvpIynC7i3SIN3N7atg3EsWwPuzDKE6ycjWTD6ToKmYLMnDgl4PzOEBFstG12OdcuQwhk2Dy5uEdxqGfViy3fV+Muev0yAkE/pRwutgQjQdw0OPXyGoqchYx33/cHq1fDWmkXZab8wuVThcx3He30UI4rr3MMff0gxdnJt3e6YcHHF0R8fGwkVC03zWXI2hfqHq+rNQkBnIbbRnepKvJylmcHn8KVJ13Nm2iHRTw7B8r6fE6LsmUJndh/M2Poa1AtxfGBniMIfqtV0RuT7UR1nDI0C8Lnx7E2KTw1MXCLh4xzGr5wZ+4T5FTeUnzd6yc7EEduLxktqh7RpmnBBPRNIufI9ztPTmRPXgF7r9PxI8MI09Sr2HQq2ZmEs6G0w8l8WMiABvlG/YQd+UHGn29acrzSYp6AfggjuUV7PrCC4flKk5IGBNdUtUqFxBRUuvn0ln7HayAAYLJuVMNv9daBwqMpp3Faor/0K+jC0FhIan3R6wBpKSuJo/6jZJoSlSCLGCkFqM9ks3sgD5cDvxahV7HNOv7AisDws2LsVATHbF0HFeoEA7lp6NzjK5dgqd+9rA95U0c7w31E1E9GbmzLADC/0eSDKEkdKGIJ4mP1erpBOc+cdJ2tVP5e6cZ7KNhzjYf19tORINCTrPAp9/aLXnoHgtLp3ozkFS/dGowLZ6Q5XInPBchgiI4TVHDDxGpwMAZp3G3yM1QDptd3pxRSv4m97QIOa7ma9l3TCK8RA/bs/akYoZnxM92GvG/3FQdws1y3Lz2NjoikVSaX0TS1t16TupL3PQioaeRJLnTZu0WGR20WLL6kEBz6cHJC3ZN9Zilnoje8lEm/7/WYOCt490+w4KS24aJcgDPzV7Z1npXy19p3ywEY0AJND8uurWeTEHIBJNxMPU2OMGd0bGa2S0yr/dfbIz3FmD06noX7/XKMjQ+gW8EBXAA7s8TA2RE0HbD8IGKlg3CCIaYsS4BbvK0B71qHhe/yM8qnUo5+vv1UpbioYVBI77UfiqqUDUAIIg+apIKJjU352GqXiEovXGR6Jeag+ufzPkPq9BqvyIfW0+3r2/wp4nIu7Z9XM6iU1Lj1j/wM1goktBnDfY6hbjHA0acQFCgUrzeGqyzYSe9kufDTSw7ePbx2rLG+fXa9qwqVwY0iBjJ8Hu6xIFmvesHwq0ySH0IqyI/Y53ee2hhju0xWAz8GishuMv4/apVLWQ4MbmG788ybGRxePWqYx/KI8M1fUvZGRXmtwAqEIaakewUVpL3QhawB4eR074Yhl5gY/ElwlcxNboUVayqJwgh4BO+/2tAutTDCtkzdLMjH4JoDpMNsf4GiLVvlSahU76B+oOlttcIm69oRB5BklrgbPCwqbQldsvvP3nHuFxBAlunefMMGZFbTd59JbO5UAkAHQ7XRw3MWDq8B3V1uCF59r4uXc+kvYFS/y8DTpQGKtO0RQx5yIonoNCbJjYWtx+zMACXoXWkrH03IQJMKmPM3IMbtMDMxIdqjD1hdaQ4dAnVcCq7ZvcbIThtCHX0+Vqo9eHoqA2kBtZLRq5rq4GG8Jm7o9mrpuVTLvym0goJuK2KQbF39CxlTG8eIIRKFQNhKC1XtuTGiIQzd14UsHWHhqhWo8uXHGhAvkl3ga8+5bDuJRhJ3ndsNE/tnq/VlJf329ATseDCLmVEDRiqe7CJeeyvMLgN0oE0lGZkmf2iYfRpB0zdkj6EpVdVZs2f/vRTp7S0ldwvV0pTDj5dzboY+nhd2hzR1+EnLPuUbVGqotTz8BWkxo9DpoGkA//5ZMeCkqFtKh3f7/UAWC5EyBZpjoPN3JGtEOdBRLX9pKrvY6tqpwaiGAHA85LywmB3UoudiGyifKe3ydIlMltsSpgc8IESwQaku2+ZlvZklm8N8KVl+ctF+n58bYS0ex63FfYoJEbUzJMcyC8Gse7zfC5MFX7nVQPWRrJ6waRu+r33KKllmKp1pqtTH1SO0N3WTP8W/npELnG6A9RnnsbtXO1WhN1HuyT5yv9KRaVPq+2EkoweAEq/Q1SGtJBX0hxWaK2UDRb4VRMHC1uDF/CVMCcfvTOQ8/ihWgrZtroDQ8J8TU0ICZVCdz3duvw5/C0eCLB5szT1EsMY2x1hKpnfS21Y7SCpG3SYv2Ii47kCex1A35Et/7MMwilelxgrwDCsXyObkepVwdrBwV6YF2qd+jMj+H4mCfhempxwCSlhXgwhS0svSPmPPAJOU4gSmcVktfs/CyqCKLzpGxHXjdcA41/gWVCeYDdjOEirh9rUIy8KlIspI+3y+XNdWrRfH9UkYQsjH7mwvixOQfc3NUvMLOSnCe4bLZ1gR4mIiaGwR15YT+Tl3AkfHu3Ic062iPlWON5Sn6ZOBE1FnGi25YOiBCdDkF1vGdzPb2SLBnucVnEqKfBB3/0KcMrT6bDApKrPxfVQfx7YJnKO6T8nddFdPne2sr2Joz+QJ4DR7nnSBvu0VEZTXLAr+K7OOSJwlE76WYT/oHDHM4LivUit0ChnsUegNFwD7zO6nz3OWYzDaB+XzVr0c5wtpZP1IYRCs20L5jOc2P1dzV7WHErHJ8/VhDZ76d//2SCCdjv5kTfwXXHsfWRK8jMV+TZSmKlKgq+pDd9Um8Ao5ShvGqMz6TThFihNrXUL2xCEXJ1ki7xL3fTTCgK/SlMt7NYeOv5xqIdQdc7tSjYt9y76UbY6bVe+i1H3ppaYh2+oBaSDyzbInglXpHEWS4yJfh7kJxXV5P2u+LeOIzmz3xpZJJCiRjdW/Bl6jbAgERAoIQABPRyc9I9OY6rL6uNAQtPDR5Idnxvsr/kLjKr3IPkeLKCYrfZFezkr7rp9oK5b8V5DjrRTNQ9+j6CqdJDUr96ocK0wvpx/HR/rCYmqave3QFmKoGUEXvqgxVRrd+sjgQlTY/1X4CgU4OYSVV8VJaV4TgLr2XWoc+P3Qq+QBNT0+E4IF8BkMZp+sVDYdvloYib8L0urBn9SZZPVGPsQ1KZZQL6rXwWJ4iQUMCYsrJRFjWWB6a++UtQVMzBgKXpeV2j69z+xlqM0Bf5QO1fCoWfsOFzHh8Z7PoJ0p/2EmR8xryZsvu7fGgNXEXVF4fUrf6i52DwAb7ptUP/PPAnp5sg5lP11byyIGLEM6hCEKbJ1uC77oNY6q/xWowBMHOROYYXcqZKGWdOo7bLPSlC3EYPj8SgaIGW7spy/xv6TCB3BaYeRWwb2VQEfxjAK1sMVYPASBhqr3jWgoKeOFdoYJ7el2BLqprHod1Vbqr+2ahq2Fjt2WIGt3mjmdb8WnGht3f7xfzbX+CYGATPzEKOOHojQJ0lpptITSm336cwdW//4qo4XdMMo/cnO5cKzbjgbAdI1eCIEaSIvmpRgs0PNQuzSKPZ3GBqvPLFPeePeOZsq+IdNXs5YqPTw7BdJ3Wm/VZzZACBSbdjP3Mbr/yG+qEIx2i0x6I690twqy+fxdKy/HHcRGcjiBMODROq+cpxRROjxHqd9/8udNQqjqcg6j/iMzOiQv0FQ9+iEyEzk/jjF8rmFlp9FtSKe4FJ+ZgNfKFAdhDVt+cu5MpW5NZJ1wKkOM2xEzSKZlYrXx1MQbEqsUb6uopkHWoS435jsGrkzgjbDUTN2SW21o/xaiSJn7/27oUiezK7sKqK70Sf2ixdqXQXwBC6sBItE6aK/VFR+r8YcU0ysxzj7WhJB+CDNatv4d4M0oFZkXB9wZ7GIPD282KqAUM+TUOqMnpLKftZAEpRGC5ck/keBU+J7/vGO//HUKOjtPsqYPPV6qY1Pc6jrUn5RkIxzc+qo5lSoae3DL/e/7a+SCKN97Elac/bOtTRy/of4jYf8HgNQVd56NxQeoy+fUboH11jwuz3BSrHmBLnbljxz42gglBRFY4Zw0Vh35KISziV9yXqj+a+72dj1iOXCc0w/27E3gQERaex5m+8eGTxKb1R32HKV9Ww94UYDdkLZwW3g7sG6uXO9+tjJY2uZk8GHFxyYlCUB8a0URVNVMYdKDHqTuhrFLOv/CWjCBg92VB19bwSGFWEfwUroQlZa9nU6FHp0a9SgpLvq2VSeReOppoSngAuft8vxNUDXeDRfZfwf4jtUdp14zLE3QvSU83RKy+Wv/4jC/Y2ro7SqZ6wAWIlYr9Js1ixbOyeXu7e99D8sjWZbB3QMD5zYpsW416jOxZ0OXKrRZ9om+B6CtGgugjxZri8us9VpZXw9Q5TDcW88Ym6Dersajy71qnndzvo0K2FJBW7EMi64J/2lr70yAJADNU9z90B3BK0X5junIBbp88MfJNKVjrm7VV4DVVk5YdmpMqxWUVW/xj51ARIxmu2boXSpUxHs9ZXAoF1C/OoIVcM/7/tOtOERzUFFRClGsw6yeTEPvPlYY6eKnKQJputuCMD/+qbhj6kpxjclAnfEJMr+Wa/QnOLp+0/Lvz9gh5hyMdgYCBIaPe1rJ7TglrqsdcoIjHObvMm2OjeYdZUAHB+Hgozu0H82XC+OD57wax1n4fw+YktMtgobt2YRENRAcyYReehwfMKM0ahR6GVIdRCXQ4RggEbyQUoTArKSS13JpliMLNEhwocFsahqxazDm//tadLKCPEjnuKrWGXEwiHpJBOLas/J2HhQEQ3XKMDCAGz+QIfkjxGvbhYARpBTgf2AWNoj1BzWwPWn1vUQk8v7osEoP0s2kaSencOFlPfRzkVowKJAnR5IZ/xv6lau7bjqsOnMutoKjJ3lWUzvjhuvAHUh7AG/t/Uubn0ZdZalVIvDR4xcjcRdQSsyxcVKg5cw9V7e8fOFocHlb/JKYUqWaG7edondhueTNK9n4YAwjgykPhcj7+aJOWJAP6tTlqIt10lC09mHIkgfGdEU7gGmODgXMj6C5bW51TGKi38mtAs4YwCiUJ/m1x+yGFP3LBsB0jswMxSIL1/5B9djzeqbYRoZAUoBuS/qPzDtSNqOO7ZLmCb2YL6vV1x9nCEUkmIvEyDNB83MxZeMMv3cIp8VXPx8X5U78sLfqTHlq8dZnhvGs9zwVOUk729bfGLuk9ZQxHuFwoodFOUMLTdgJGPaXWjEaY/rdzKnuN5GDhtJ7MDqipVFd4O7PUNCjeqQo9hJAbPRaCXh7cweIWcBkVl/0df+Y4vGtmvQEyt4wvQyYYCCVE3J5m1UK60Uf/DB3OtM08Xcr/DiRG6zdIUVcdpQzRBRIJLUoP5vDp/jj4qpoh+bsR4uIQpvU1ityWixGiAAMVZuuvnJ+G/A7mc5naLN+hH6wELoqRxDbUqNerfxulkEKIpPwiZ3l5AI5O8yLiG2Pu9tPj0QoTz5neBDDNyx2EyAlQh6Be7hSZyWqOuS5YWbs+h+XVmsNdQaY0CKDsX5NjgmtYeh1KF+RPYTs44982RosMVUnijKP5LrtM945zk38/RZ5qR/Wn66Qm2ToKEiTnw5wQFFx86/lZPeFDQKpsxx+qi9rf7pxVALvl+p7vehLrNajnFDAh5DvsNlWkID/jgipuNSFIN6TsLuMvRAbqWWJBpOOVaE9Mj174Lv+/C75EJPVMUAkzvBpr2scTNl9sSixXgdFsc1TZ3zXs+vV4AKuYjw3Gq6dmnAj6Qu0XaYfgnGZqz4lzYJIff2mP1AAPHN7rCfnlza03cAppazc1WvTqIC22Gx1Sn906cdcG8LUobdx08sXTVxi6wgyqfQUuU+JbCpH4eoHFpUMifXmGHRHciQCytE/UIOKTPX1JNFnRKmEM5DYhfD8/wi5nHgNS/L6zHqpsrWfu5UyvumZJ7XA/djiZ37x7JdpTVj/8EgIn146AYRoVlS+V1xWDOz6c1BG9BUN8ZWdpY/Y4W65owEN19CNg9eKWizEQD8TH7X5rz874WVlrsEuBOTN9feYylhT0uyJCAPWX/ARhwX2iTSVsIemAGwI8tvoqq9u8vXU/j0+EtiFYjBm+GTo/E/GqLjSsEIc+B7RnARWTjfMNqNu49DoGVLUtvQWAoZlYqGLGpvis7PlO1tNIRbhaXcSXasBbO6DpASLBZwGTfZzpm3D2OC60v52f22uwJx/2tHRUILWXgbmc7/kWnkb1FZbpUSfrkxiLcX6cK+3RLT//Pnbk9wva+noJ/aVFb9ldBkkAk4iX5XYHSTWf2IdPe5Lz1bBB2Y3WtFo0MR1LKf46yQncL+FbzWTLRSHPY3UeRhVg3FHkH6MnXYpov8hHwZ4FrJaT7LMmdj13DL3HF5lwwYzvkclyUJ2taQCwnXPlgXvWRgmYfNblc98/yn3m3wWzx5rS4gGFHqBkJYwTqW2cGuRDVZ0V3t3+UfzqIJmK8nXpm0GKjZT50PfMjsS6+uVgTHaQ38HDFvpBM/1z2Sh2fcGfbkxVBWt8Wwl0Xntt6tYYamFGfqR+8W6VRVQJitb6uZZiA+wcbO+kfZOw55VGHld/USRiRv8QuxGe95TZV47f1CcCJzZhWqiaNH65DLsLAja7DeNwxd6CHaDAik6S6rD0FyZ9PQPaICPPI4/xAo/0ZVnd/yEc8OI+3yM4Ks+YgQ02Gnrl1z9lv2Y9zytEPBDFy8iWYtiyXZ8i4U7AXOGd5i4h3jKPlW7h0OkRKiSSh4TgO7dD+5Sxk5kAMUo9nxumcCmTBWL6i6yRnsKmS0nkIyZI4wuEihk4Icof6JsPqrvXxc9VgQ6QWQ0FgAeubKbqIFgV58l2JK4Qfv3JKYrKMS/n/BCjRVZh3DfkTcZzQg+m9Ytcze7bv52bN0S2xrDITaw4q0IKPgmXI5Nwb4HA2t4p0iBHgoqtMbU2tkoVyh16EVnCwnS/IhHi4HTlcKSNDCWp52NXf0cWGjgxDV2ds37QYD6JoLz6Jf+NIUElPQ/CySdVnfcTHK6h1xjG3K5OoeIboMqJ0WxKdRm+Eu/2OpC2T/x4i0YxM6pthPXUQ+tYnjYd4csTbjE9aAVexoM+ARW6WJj/utUp0VvRQOiFRTLDVNJfzG1YUDXq3u0cAWkezq9q8bny97HBHP5vnjzymajF89NHP+bjZrvPNigJOXSPybJPPFLhTPZGjryD+78fT0VrvMHkXutC/Yqa2OEXe+jYXOhx5phxknCngScLmIudX2c/fXXxxoLeJHD9Hjv2ASlDszSEuBFDawPEMuQaNf6sjTi3PLgOaVZDID+NAh9sw3RqcnQjMcyR6ojGxkDpzxj5VBNHxbPXNuAUXPNkl8KfkAgwbP1qBWbyHAzUBg0+rBcRBjnD+WHkhiJRqKW7RMyyGMgpk7E2p75ZsdtjDX1uzxJ99QT+q3qEoM8qfAMniuUoxeVX4WWaL+eS3aDhE9hJtz2qVJjx/oYu+X6tSjSoY/3OHlum80NLM5h/tVBXi8kSFmtV9NkiGPXT3OVpEodhhCXBZOblOTOkolbawoROX1tJNXpNAJCxz5d7jkjPM/VUoBrvtXcfMBJOGyAgrfCu/qZ787tsi49ZwMKPjW7SAWzgzsVVynVS3SyPfUs69um4QESoW5rMqbnh0jTRCiCGAjK/2jDjhqpA3r395j0TDlQh9goCzwzYfEyFEAPspF73GcEcR2eb64S0bRjT/SUrPrRFUSV0MhFefwXwd+mv2VcF7Zr8GzlR9fOpngy3xrC7GkyeSz2jNSwIkpssLpvXPbG4mzXs4WBFDcDb0hZmFHvU+fLI1+Do9lQ3KbSyCXxA3VoveSEv7spX+9EGJpHjesN8cPcjChjVozfOzGWDXw9xRAFVbE/eLLrik+ftGqzmqm1zNSbXInJqfFmgeJAH95eS7j6r/kqO6b38rKtMIRMWj/2xtArTtpqmEbF7JgQNM56dIsKgf+Iea3XeV2A5wa/d1EMj7omPTUezw5beqBExgShFc5xkibXHuSTLD/ibQTya42F514GH+1CpmXJ2MtoQMBv5mxJ5l+HynS6i11kfku33m6CMPzv9H7vsO+0OMgK9zf7qOIPIN6tpOkHXJPy6ytHkPNJoQ1SStUawwwddGGOVu0u/IfaCp47sLMqIoUAF1kZSt3laLGeW0Y3/Mbdb5j5NwK+36XuWUvJs+eHIKRvc7KqcW8Ww+ReglXFdc9HGmUOHV6t7hQ6YT059ThcDZQf0JasLJwFPAo9BfHL2sgBUdF4rRt0jLBVNaXbcwO+tg374KIf7dHcKKkPQ9HT0fzkBu0+SlsEJfpqMklksImd6Ls1clJSORvKAnzcPvSbxA2vcGg++Lu2vdqSzQXD+2BegqE95A7h0Dd7VH6AvuqosfLpuarI5Hs+FX4H6vpxMa9lb8RTIi2lAI70CgggBALr8nb9910Az4BdF02PCn0uM5oa1W94D2wQN9sW88ivd2pXMRlht4y0546P96ud8Daxtv1acT2henrCw1S3I9CpR/0HDoKywEzPgN3JQsJhDfsvEhRCrKnU9miwvjCe38nlkMG9PVZmVTjlvt5UWihzbTnjv9nBSnQ6fhz4QqqRBAi8Lcmc6IKuz7CuROsY4lNCHW1xLcVoKJOTOMV1DUKCXn36K4bkiYE0lhWCtAZQBVHkJWupZpogjd5mr9qy8IfXF91iIPKw02XLgNiclPX6q4r3m98aMD0c/slvsIH0r5fphjLdoQHYPt4Mp+Vum1cGk+ogmpcwSJnBJ1qbrFvlBmcGb5LoMd9z4qhvWwWVOKw565kyWkaB5WO4v1KFx67KVdPszzAUF8u2Ac5RIPY+4Db8hvTCovDH2y3q3mBynYJX2FjHS+3Q02E66thuzHfbxHIKHSazq5gJWzr+hYfal+5kZxOfydFMIC+jdRmFajNmoKFM2LOUlZMVAHPVTK40DshixVjakvEMUCJyDHURyydgDbs9W0ElSYq9mVMXF/2m11KY0Eptzvuh1LkFHIfDOdUCjKOrsd7JeUqF860WPgxHUnAas5HKBTM2xNXEyAsQXtQk1jU/CxKgLr3WDLF4eQ76a/BO3SeGhytpasDKUMQiqXyN7v1gJeBQoyiFitC1oHUVVTg7EgJfN0B0dFWKL8iyYItWB7xKtXHPsedU9EWRfghBAxoAqf8GLW0905DMHdnIQKg/43iaKWNqmNqCVRMKQnShA6GN6tOxtvaVV4WRNtwtEuOP2U42cNA702e0qFtmWDBjARuee1qhJCuklkYdDFKrzn0MXT/5xxNCtGVLeZCFPWw0uDUQu+HjD8Izc42fnVGS8fLwGLjj0Ajnn/MtVusCHvUFJSPLG8qsCXBuhsywmtpZGKKe2EP+KKphBFfExQQJWXR9tbBGIcygK9c6wj3Tnrwii8D3oIGvEgnNYWUL0pRVSs6tpRwzXwK1el1wAoU7rUQ16UoJQx01tWEvxN7wTsbo/V3IHp8F/UAMNnK1GQDZqn/NDR1Ln70yT56kqXsNf88WI38eox55vtOCePiFmpHddvRuMZrmSu9FFQtd2rK4eDMrDuGxFJh63+n53iLFlCBbNcc1XV5CP99B3STPSzYHPS9n0aCoiDL5kJ96LelFEkFqr9gOhG/3JpW7rGw30Mv1rFN4dFKn58dSyfi2tHbz2geuIVG5BEhujxvhYg53CC8v1agYd2zlSPQnCKU2efI47iXbGw66l1ACwLWsI21pR/HVt4YyjKwy8IWJoNPPN0AjcDq1Czis6kUXfmLRDks7DciEdhOqT49zQyn4hNebkFg+VCs3Y1JfMilRYdCH5aJJn6g6w9wqE/qCx6wQuq/7Y5ImEpKEYme40uqJMjO2oekz1FhsZ8PWSku+d+Srus0pQkB8MMjHoFrAtXi0QWY1y0wo6Ci1kM6T9wbVLmF8hXkfqhEdB+RcyNqQeGquNxM6rU2JKvy/HLwO+zTD53CQC1ToYV2+5MCRr9+N2/CbcifMUN4VIEn1Eej0zwHF/yN2Dc+UYWiyEQtlG14z2hlkDP0CPGq4tt8VdftJ+HvCw8DXvTWTnLnn1Zp8JOcQmEeP99YAYcjKhKnol+34BK6OqlAPxBhpdin+TRG05T1CoGS4qDFCdS/mIdCVFv9g2/QS1SdUQIS52zaRHnQQCSCWEa+ZSTfRHd58wlVwt58M3tCbGyNiM6wA90GWFA+zPn5OSuWleAC/cHp8uaJ5p1tC2CPYxbU19N/pQmg+fwNTBO24wUN+3zJXC++eGtiFofpQjnDWXLH27+oIG+YuutaWh1jf4Jsf3HybnAmDBUf4D39zprOur24+buf+h5uDddADdFHnQ8GHo7txQ0pEU1Q5L6tUw7JY4zVLZ7PF04Bl/XLIRHwb9hGoAEGsblcahUXa6SWq6oQmyoNO5l91ZDZk2ovSdq0kMrEB543Y6Uo8UvPIDgOwvcVhjrx2BDy7H0YG8rMIerCI+4mXi+xrU5Akhyom5b8TFqsEmZN5lvrsdcNtYc4/d7qnkbVYBZlx2MyeDC+ch5f1yVBY1cLnpjFFHFUXZFmpzUrhXPc20vgeXnQQgqQtV5fbQDYUGz5KIe8d1wVGIVMut1rmRa9/dspSJMmE24mNe/K11eSymPBI+oSmwmo2KobIOb4otMXXGiNmwVSN8Yv22FoF3u2zgpx6esCfGLLScnsXOpCf0f7aP4aqwqN5yeypzAlhF3+yakuuv0m/dHUEhxuOqStrxEG8ShJv5tkHsM3V1WLRkpBAadXPy6gSysA265grR8BX4LbUZnFqvoDDNrvRSweNv2HddvI2fcgltJ/fIcEu8Qk/WNLUUWJXdMRbaUwO9IPvhQULFEUCLdqvK5bB5oDnUQQ3FTq7Lspp/naoolLMn7k6K5gx2IxQnpq9+iTCzU/vrKL+O7Mi86AHJxPCr9tk/MPEqzaH1SjA8zrPqdZdtyngTEMn5ZPiHV2zMUuWPJ2xXT2zrpyx7mVXJdl0SE2gbnOTs2/5wFPy9aTKynFtKxZB1y1iWEAlBWsTnoS8FE+6CBZH01xww9GRjoMi9xDee+wXV/olDo/dROj4RPYSvIeB3tIxorxRR17YjyzZPssKDGTvfzKM8kqYYNE/BqEKBKLCz0bhPCCWxu3JaVJomTVJTrFy9JzmBMy2O3sgLRDl6X7vkqOm1AoIIAQD7nE/lfkcKEttlB0HLSKT+yDGo8kJAR4zKmi5fZpVgWYK30Aib5HFTA9BHVZElnhTeNyvYMdSO1FdtNsa7tQ1/0rD985d/GLXe/f25PAbEsmgnFMmc9zSmpLIZ5vTxIC7Bk73mqwwgZZxSNvpqurbUO+787vMn2wKC74fJHC6NF5FMFrCypu4B5RLs6C9fGjRKab1vW2mi2967gCZrB1celCcgkBzN6XA7tvjDozDz7JU+x7ugmBx+6MKpsLc/FPrRgEhwWdPIsV6R+vOqRugeTBtr+NyvFhAa639l/e9EQwpEbVJgbNg5okOZliYDF4UM7YADgv0aKJtir+4xN5Cka7Jb8vyYIAcchy4cjz8IDNK3SuvhRmPTbEOs/xwZpoN3YqUiARI0RvYznaByKpOJSpxzqqP1W/026K6n0KagIjyQht6p5ElpsXlIgcH0fwpXseNYl2pQAzj0jAGFaJNYSBdgyQdZkoiUDprKUm9dZfDL8m9FFpoDV+BuJmxDe2XUpLfDhTnF5n/F9wYjmd4Vhfui0HA6kh0dLvOS0EZEvz4mT6zD7Sxx+T4uyZJE9nq1KOEpQTW27mzJad4jXJkiYe5C33DSEdOpwVAu8pIYFxmcj9uuNHoK2hpYcst/wYuNzgAHB9LuJaJRFLZSXN+IVyBWU2S8iejIVYzAKhm7Pj72hIE25Z7oQE/MniMQeUgmoIlqxbSpnWho+K4koZGNIyiGv3N9XFTjN9YCdWSC4AVuyfyKa8c8Wl1cWggnOwhj1CkFeMCK+f02a64kupllLUL5I2bzC2drmjpdEGB8m7KaCWl+W86pWKHKltns7u6Z0TlEPCk2Y2+ypD7GEicZSbMwAPt5jpTfxoMk2h9ICzgDbFPaJTtAsYNMiAYz9Sa+w0ELdSYoGD1OqN/ZkPE/sGRcXfAk4efEkfRDbCU0hiH2HMbKFLhH63/RfGSbgeYSGDHTs66JOJ3htSh1arYOmkwBB5v33cnVCmRiUGgE4QijTnMmYLKH42txfzD6fU1TJKUr2woazXiPvpS53tgSbO/zmBUE6fiFIaOGpT0iHXhx38sDX21VPVY4zwkYvmFNKliwgnZTZiThCNF8e1r4W5SlOyoCm+cc6UnPB1XOYx/Nd1W7Njm46rL4rsfZ2w18vATLl4ofn+6M1dgN39FO6ueKvZzxHUH1Gp2J3Z1cphfke3+O8NKi0BmIe+TjfuTzCt6l/rkr0UjKqXqYF1OedZe0kwkIRDmY6cY+gQlIdIFOaefF/3bBu95mAozWMTtZZGAPrf1QM52AJ/0fZKjoBvvZTVbeP6TnuulOcahtZVGDs3Q2Io9d5Y/c/adXwEyizH19Z8dV/ImY9JdmXDB80wDodoo0/uL8Ig/2NslKCu4KtxjzLwgKHhsz2wWgjagn3AGkD6nlVdElCPwRMdHW0v1Ld5RzZG+oXD88tXe91cLH7YY6k44pB86gD2EauwqDPSk1Q0TPy+Fj8sLEWwg/prsVZWMvwLvGCRRCCUWiDJhuWT1dzOxHTcbLJSAqSTaRDccvIrFR9YdqqmZtinnSwzByzOG0xY4uO3j4EhK3GVpi6L8zgoEqP4F1vU1EwPn/W7VfsLggBBRhG06yk+R4zOBtUNOHi3Ra/P/D7smXKmgR5hnz8tfObTgCO6FdIZAnP7DbS4bw1eykk55rG9x/k76Kd9iB6PtlnTl2gqaCcx/JX09lhWNbXL0NL9J1T+aEyJiHZyViVcHBKjXaUSlf8yYbuFMSV82iT/LgYLSmEb+tsS3bm6Sa1r4uoOrET40Dky88Oru7hoZ49f1HJrGLhoRlDO4rCnXV7QABqwAE5qJCDZ0Kx1Vvs0WrK1yypHAjbmK9O4+98Ih+65HhdXoR5Ds2Yj1ovv+d9NWBMEQpLEpOdtEoZ6xqAr1DDgdPVg5wSPtEavKOEfQWfPERqCQC/oqcO9rMbwEZGx3wcJyIZZ6jbupWGcHmSu3bvb0sJjdX69wQGL9Gl5WzR3xrqMYDX/ObNKml0QM0//SX0+j3FhMzMzwzqDc79a0FnXjjMBloIRVWsFdGqt5ZF8fXSEkHejycJDbyXZ2amxtPN9LgOZ6GvboFEnoEpslW4shx2+zO3Q/u0YYbaLGZu5zKumObpau92s8clYwC37htg/IT/JYLUVvSx6HaWj3GaVfvFlQ2/oH+Pk3MOVAyx1GXpZoOtjcs44/U1fKVIIAn0jX4g//wcsdt9jdbdU1PD6UpH5VlH8xJ3fNWxr37R8nIw8HzBnbrgm6PWH1wiWzbZSR5dAn5WUv8MS8JxMKC+QyNjZ6/kgfO1Yt0PV1EPJ4ji6A0F+akKWlYVXdbgGVQyISsje66u4fncZOMHgVwlF3X2sNe+ybRMUTysPsTAmRm2YUvIX6b0IGL+CcSWMKM7PeCyX+utfIn2IWZ0Wa5mjN56TRFBx0b9Xdnq9gLbx+HaUHSLERJloYg8jfeshmUIha6qfb7ywtLBixXcJTQUYtlXkQJ5pzXyYNWqv5gKShjAxsOMxvg/AvXw1g2TKjq/vZs7X+lIbghfEilIu8UUn1r2Lkwak0AI4si1prjsqNCaxduiZGGjeKiOlDA9c+72AmrGj8hbgCyzOq8mAYTlvadCUH2GRmQQnGVvw2pxoHpFFFBx1ZPWmmU44lnBjlWxPfQ2Ic9u1yLYHEnUVYTxDKHK5bT8940F86YFfjozWK67PFKWju0iuriL7cvbi8yxyeiTwKCCABX/mhaHRoAGGl0XRgu8izYQk5dgoWVp3YgBpI+74EFlZQKQgL8b/JvosV6WV97/iSNYNKDHGGahuFEFvroXpEE20rxxXjJvEVFlrCuRBbePeFQ1PNTI19GOxtgFmASsOqTenElUoKioJ1INJKggxPRWCTtnhmeRP6deD+kvIyJiAEHFHISdbUFgdiM+QyZhAnLiv3RFHGTyInVFbzgmCXxOEsOX3lIEC1RexGW6AC+Hr5XE3YT7fQD1HSEjSjJwfHdEd3PTyucVRsI4ftdtyv/X3nCxwswQekSeFPvBbTvnC/9WxULA+IZcM7UT/zf1go9AlfHmbdvF5meQN17ueyxiEhbHC9mnHSkOMiFkjzkYQUz/ZmNdAhLhGYVvCfTgOdjGSf9vgWoAsysADZj5cKd/EK0TBzLmrLqVgVm7PxJuC1zvxmA28GgGN5DKrANCP8Ky9EuXchRX3tMZRX/03llAtDAhJjln0XMuH4TOvPxlAYMEuXMzjM+qC9r4e+CgX3oAb04y+xV8ytq3EBJpxzU6rlWmDQlVgeqCKbpIRjViloToNyKctuUcrQxKBXEXbWef0Y8iQQyUSlE4RfThhRc+D2uCbLV9wIXxGBgy9zp+Wq2ob6a7AZDpvMh52GgtjL/HU0OZw02dF8AxJuyDI8m3FNPXzvUdngpbd4nmrl5H2PZIe+oKCS7p8QLM6064IKIulPYwBBkeWFyM3bNI/0ZDa3U4aaePJmluaWIQZRhoGtjTs5Ty18WkztdbkfubFXxNy9qnmgS8V5M7nNCFYZr7C3U2UcUXJM+GZC7HFS7voSr15JIRpxH4gM/0kblyAUibAg/pxjI6x3FOCWk6j6AUXVULGta+CrZBpzUys9H47x+hhCpXc1clO9ninAazS45Xhyb7Bul5YY81zFjMHIyW3ajl2NgEjfOPyIwziYd5qqiAILL2vFqgv6lYKtTi4F8QWSdgEOCTuj1AWH/A9MFiabM3kgfgi+RkFSM5j+NkrUGSqGUtQCdm+noOZA9UzCc6CmNJjhYgb0MWgsIfBK1aRaYBmfZEgAZm5aQmCGQbSVRNosibkq2S0WKIkswx+V3vBjiLFl5IT5WSjrfyZnAvYWPqB90dBUGpLq5xYP2tyD/ZaMOVl5xmPS/b70VVkdTFpK8dF6u+coe+COx3G1BAPbwLyHSI4Ta8xbBQd0u4meGfQKOjMFv+nJZI1UdOtyMOWK+ch1Cq9HCVeLJMRisWttYTRJWwD3v4thf+wS3lZRXNcJe8fVRs/5hDPVlEj331ZDxQ9kjT3ZInw1kb/GrmBRCOmoQMQncJJ4iSXBRiNl9wTVODt5y8p9wW/l4/tUjGGs6vJuGpjd7tqD4RiMzsVT8JATcZdxMOSImx300FwrXxh14zDJcUjLSR/MTibbiZe4VvnXyBef3XlervqD9sdrN6p9/0d6qyq65j1LhbyauEt2AFVl+nkhCkGNQG1AVXFSJ4NOgnAt4D7Plm4mK8d6hgQqnlbIynRFSMoGXqrRSuBYf4VGAdZTFpvruKZKO7bxNX/wuzpTG/l8I+nR69L1oIDmGNnit4cfvxWO3GoTJp6b81gsVKLexavCW2e5wFYOoK/9yHTu8j4AZYY3VIX9Ic3uWInWJe1O2laC0wDW9eQTuL/3g8X3yqqAB2tWyDebSn6e67cr4x5NhBLqASgWimpECey0adDrVCgSggA+dZRV6fA2niJpsAhSonzO+P7/ScTc6b/SYGjao1gQpq7nh/vioPphvcMOQvYRt0eH4Z5Xwjk9ZmzfpvxNGrdkVaBpXrXWs/+JGAJRwFrylg6uRxSs/xuxL9PBFtmegv5x3Z4Tx5SojnYKoTCiSzyFPCuF7uAEeeReGmGlY5m999oVwwcDwxKjiShh44IIbNSXTuOjgJgi8voJhFKq+rZyC7Y3MosnbCdLe0oX5cXgDSiAx4emb0L70D63dhNdBSMRAzIfrKilZGtk5CqcJs5vmJBTTDC7OOZBDVQ2fELUj2hc4p2F3S8ro1oC1hfbx8FEBDoioatCFGOPID+bXZlK285umMC93t2jQhlM6C4GtHSUEp7r7S/PvRq6pLpwwiGw7CKAKc4BXfPa81igg3qEjCRfeRywkkUpd7P6Yh9cUZKh0JawCXY7bi4WLCjzbEvq6M7BXU34O/uqgQJAtv3mYLLMkc4RRPytT4TzIUxuN5uKuJOkx9yZViprAy3Nb/kyzoPuIOFgzPIrhO54w1bqvWMMv6MW4cw7sf/G5vIuz+aNfRS5HqGlgSL2cFoEllTrxeU6JQRQqy8t/kD5nhJIA55++zc9j8yAHk/sJY0DzJulv33tQYstVofdkSEmUFmmAYMrNVB8BnguDd2fKLOpeyfSw1stu7y5DsBjNrzi+/q2wZr2naA+Fly3FEXGHySjJUGwWz9LuCYgGevfZyUT9aTsi5eufmlIG1/PJoQFy5Xud4TGAVGPB2BMs9/b1DZpbMcYW54M5Dq2eqrsfCgTLZ+jNIwopJJuDzLybSC+EA3RvbzYRrMdCCvgzQbgU7t6+9WTggPoS39Fcq7LSFqB277kIzIXQm6hD+zKECmHmPN9ruEvZ5EWdalz5ZCj+NSe2xXjW7+Pd8HYg3Sx81IllU2azy+C26QDiGjbYqbvNU7DOLvY3rQjUAXVJWkIusxfVsQmO8biXxE9iNoDPNEARvQzqhNyExrr5kMmVDbgbD5+c9/BeI2tmV7SUp9cEkQKCCAEA3gJknVFNvZgq/soq/qmChnRoDYp2sTAS0OJlJwApcyHNsT8Wp6tzDQNdbB5S5PTlPIsIkZVhMrtcMzBU//oa+FB+DUBYfzPrxMH9/cuNgGEuuRJXin/FC4JCy4+M1MILI0YgB8QZwjuJ7jCCmmiDM7xpdcPHfYUCN0vSKeuwmpxTBubYJSnhELsQsur8nzU9MpmJB+c/Fzp5PAepbX7yhGSa/p1Gl5G9Yd2uUkSyuRwLN2Tw2P6vu0XY8BRlc+VVx+mpVBMGKY1xj91tlj6QkzQYMhfRx6oONd7Z0nal8O/b4gYbgkHr9p47paKaArpmVrNw9AoqnpxM3ps7lNaszU/3uosbHND3N0oZoLqhBxpfkquE1dSyb0Fo4/An2mW/SzjsDvHi4tUzlvR+gtpF8ZwvsVpUbxTue74/wT+iFNLqJSu1aLpe5MnFXhgjm38nPlGqe1hs3TAFFAMQZqeREakFkaJRx4FLVXZMWCqef5Yu0hIl8aqH5NURUiHnDl3SUjb8f1dvNiW8CQcjiNMPQCrtFzBjBoDsgyltgYqYWsbcfCgvBzquvur6ocDqeRW3kMm3nN8vZSy6V11pprsdtOz/aC6QuVsGDkEooeUXfqr4exWFmbXVGKJTezgc+EFdBKa0uujJLHuPOHuv7lHyaT3RPRxn8abcdIe4bVJS8II3jjiuP39P+hqgw5qXaON75djxuJBUHTCJTZAhL2FiT1tB4E6TFEJpBLjL5A06kZh9Q6MqH8iCnqoWJE9wmxX4WBWNm2qLxeujMASotv7/0b6GY1t49JGXfQ+c6LQY5mtDPJ7knKtb/tW77v2THFpaD0AjeHFRile86OtGcoh82hPaV4hla0GSaxiR1TjubL6a1dgNwHs0SCQojtJf0331AqxIc4V8BUKQcpUBv/hcZV9nnMtba9ZjFtsi0hQg0/3huwpVDKje1gwHXnzRPesWTDN3QlM/pkEDxydf7yHr7sRhLhXF2rSjB0Vnogq2Imw0zFRHfDc0HYxt3J1nc5u8ssX7JrI2F6Y9M4oKwh764xTTuNF79UbqV1nqo/s18OzTr8V25Nu60r2mblxTUhFk6bvz5wmzsv/GL/i41z+qnudlCkNDL3qAoQoT8uhaxSpJPNK1DplB/YPLF6lG7WbtyGmp4NEBZzLDbTUoDD3060e9Pi7VxbBnX8wwptKZ6FZRUSGsyWsUNU40pZp+qp0kXfqIOBz9vUAxK0o+/qsrqe9Jn1SPf8O6Wb82c2LL9KMIrpmuY2jwUJa1LNUS2xxhixxUwop2GZb0YgUqozqzJxU4ko+I4jgoF8MKGAnu9x0pzo9IbABgYeisHVhIXHx/2vCq9i5klyofDn12h36FIthMGGiYEKSqKcOzuyFIMkXhGINwXhpwgWXbxFfXyeZnMjqYCTr/UeJPIK2THjsEckGyUaW/OKPqDQYZrgmHxZ5+sGgrJKBQQlIuyXb7U9I2c8yNxZW1L9IDG/RRgBQWVkfSQA4qV0+0vcvGlJ7E+GV3cGzbyzxYAq4Jwk3vNF63rSpGVsRCGyPv9LR4fsV6jMpX7NLlRSbIv2Gm+QDjVkOL/Ot7h82BByj5wj2eh/WRSrpUvdgp/iG3oPn6JRkU4w8gYHR+aIobX1e0f7STwZ3jWxZTIor6pxUTTUOsf1nZjAsFdjOVLtrf3IJAfKAc6QnkXA9krtyhleUUlb6S65LBsa5zO3WyBVHT/JOblK/phDiGlZc/GofnMfgRZkec+k8Dgd7f4wIt6ZHWTYKBRzzWTfav/gHNeZBNdG/eNL6pmb4ano4tLP46arruihMVIMH8WSmG2q7gcXbDxTyHi9qPKzkwNq/h+SW18WJ+9/qBEDQ5AVKsAfJaUd7qIUmJ040lL/xUTV075bnpkBuHb5+M29JAFJe2P2vULBtv3Jc56pq/lri35sSME9eniAzUexzUp/iT4Y8fFib8TJ+ZLQ5ezHDs4o8yngXg7xDUF+V8IGazHUMDICtl+IpeuViut68EH4jR7KldLsO5syRkJU+2lpaeh+7HUwXzBPRbm0iO42h8PW5rIDxp1iQKruVtnS+e5B/0P0OLD/JFReX2TWAEWMWGBHm29Quil/VHc4XQ8sMODvUb+hEVLUsv/iv9iVXx48ERGTiotz3e9zgv84SEZFbYjM5DhG2+CWwCS24OEmgYWM2P8G7OSuwa0RmmDPshoBQVf4+ZzuBxFBPVRLC0pvvdJMow2DpTRcKCq9CS4MG0QS1AH2QRCuT9VsrYTueWGxi75+Sq6tOcSR0CEM/MkLgz/KPeNcu6r8ywuSKbIYDZtoAvwOrZ0swTEE4F05yeVJ0CmTxaQa2GkpLPaxPSpMOWCHNF9Bp9RTeeGNAVzEcEIf5L5TK/ayA6eN4MGob3PjByTlNtxOTn5cIHkYSd1mROIyh14hMeZ4gPTXNqWwZG3G8tHz1GKDTflZkb9a6Wm0iUd2xPaiStKFpQSlm7zxyRfm7b1K6hbIQvs8ulciXVr1dz4w9Y62+cGyDbox4JikfONtmKEcsroixC2JVSgqVIHYvHoMR4mXX2Mft2Occ04gE+iCbE5wcIheYFncStlNeLvFGSjCQvw9y9PJ6wLI482gAaaivJIFgGxsvu6DRDu4XrwF1ISoH6KALeSRlMJ+ZdKQUAxFDJLnGPXew7GFoGXNygE6IexiWV/swbq/VLl2BM4IvboDzAhtERI3zLRPMLfEg4OOjAO9zmvGMCgggBAI0U6B4zfbor2UG5zkmlHcBbOc+a3/N4PNZLwcWfMcS6hzsU8v7fgM1sOz03K4EEPr1ULSI/Tq71XsIcaGPt124quX2O6wzplYsDYy40MBeeKry7xsaLnGo5UCqvCprelYx2zGUY/fuz2UJxbeMyM9m9uTBZ8h3rOuioGQgmRDhI+ACcti4kMKg1W0nqd2pZ69tgCEGt3H2puq9SmukNm41xYE3YkMvo5e7yjlWVcdQ93K3x3dPP8mtr6ckkKMhOoxDB3tsd69LTxXc3ebhD1u/pGhqyAvpXcPaN0TqjhNMKdnn+G+g7BfOjmO0FsF4ElRO5d/O7KrUs/E6vfvE4m46KeWlE1plG8C6Ukx/Af6UwCHtWTMQihLfskuIMz67o/YDOnJ7miGb146yd3E1nOjydRwUoSeVPYzLCL4R7aO8DCdKbmVnQyh/xUBSM1m+MWH/UyqFQMx+vFMseDoPjx/+G2ZvKa/GXNRoThXonVpAFFXUzEU2DzIzxa75FWUNU4Nhc9h3HLsYCG4hYYb2ab45cQD3uOjHIS1VB6tXKLbwBfIQFH9bi3wnUdmGBnRHAU3NEvflxmNFCejBZoLsbqp/niVr1BIzvmmZHOR0di07sVkKdoRGBFuLuS53UPOBndoQJre+SESEyVNwdN8jnDFsCQ3k4KZbS9d85MgoCagtNA9XaZ0kvQtwP7zBqVAwEeCn/cJG2yKbVMXOstGGW4TexTHiGlSCT0Q9lSAYPLJqT96x8vL5JoUeGaIL1h7b1hdwR14LZgp0nmROKzCKovASkuMaPvSDv8kA3TLG9mJD0flp8cB2y3+njj3j8O8aY/RHx3qNJwIR8djGnmcpw5hjzFA6rbx0zj0UCc69ogNTbdeh9Ia5Z9RMdsEUkBLj1+AABk5AV90xv8wAUjxzpflhR+fz51wsvAL9CIPwvJIvbzSHZEPOgKiW1zwOkO8NOrG1GdyPYgD2JseLxfZQ3pivqfcOekLJ+X8ZT3VN+wqojz99lXFUDA4IU5WSRYVROmWypZm5LfhulX13+REGgDs0sGNmjqCNcsQ6UW6NFFIK6dh6OmVnuKW1+lSG097xlhv64IDabYM/wf9kH34QLyyZvI0OVVoUnKuiebZMExAZJ8NzxTyM6ol/J8wIHRuSHXwu812AVgUdIDGdswNF2PjapNrb/6TRXZeP6BtizlHWoMlpJp9QaNOqhNPj3uONB7P8EJrS0u23SXunTz+GIKzGP4x/a1hDburtYmoKUrls+rF2eufbTypANSJf5u7niVnXaQn2Mpy07FeeeptyYi2hWgXOrWjtsUy9OLRgR4TKyzKtj3rJX+jRJ/SgNv59VQta83JN0Xw+4qIJPWhYHvgSAdp1EugnSK70PvoLN7T3OX3Ox3HjtKcZR/ClR9w2hpoWbGEMmqeiUug64aYFQ6UyBeKWEUpPT3rd7Cusu57WiSoj7OsXX5vWlUz23Dmz/UqJq91qo7UorjlueIkyMgPpaKfEeF6FM5i/lkBBPlB0rD8l5RaJ7c6EgD/6ahcyM7bteQIpL/7P8G9VcWD/45D1HqOhc0DXenSJuZBnA50IMLaT77bomcEtEigMCiBjKpTCSjNJ/CL9aJe+1EOQpYL8kEH7ZHrUlQtO8tnOCM3tQ+0d72g0zo35pPTbwgEOdH7BAqs4z/EEdjmEaE15VdmVeDXYUUnl2XSX4TO45G2l5O7wzLwEvYWRx+cez6ro+Hv4f5MkPeQvyqLzKwuwocOG6GD4TzsUl3w/h3Tw+kEkbzPW2UijeygLSYad44jmfwTQCwee/DzPbiGXv8a3Zo1KrT0+RLgKQ4K5/RLqLFfcHZtgKqSIFKbPaNPoBs9YWNkR82Be75bYtyAWuaNj/taw9h06hHBlN9JAlXE46wUjZ8ScG9Lw+pI9SxW+k5sWzrOjCv0rH6wGF2XwEjU7zTXe9njj4zPj+Jgsc1Q01ThYUNfAXG0M1cm9SteVEovgRXT14nv3yqgyOMW2Q/REGqNuyRvrbxjfwfk7ZbvVF6mDR5ayB0qdnH5YlEbDfE/MmbEQ1UQvEkbMZsyrCzNjUoG/DxThsuCARZt1P9OpDSYmcG1LL4TgFfSZIF40QfHeJJjZhwotCwrBSWCkThF/TAHO6MFYaUvX0iofIMzIjdojuf7eTLU2dVaxDLoYWorvKl18T1zo9ESws0Ro453sXTzvQbyGJaDhYQbAhkYwvzX3D1tq4r4iBDqTlJKGsX59z2G1m5K48dIAqpjysknMyDeCz2MyfpKbj1ja0GzuNbtv0X48PMR+6PTMc25zatNU93aDR30fE1BEtjRgUrUuZzMSC0FkAkuqWTuN0mK7kYZZ8Uv3fSa0pOGh/uEyuIZ2+slOobCeqiG9hgmOnjvPAY/DXRTu+sSsRyeICSfLaawja3ZGhpz/fTFKygSY8O8Iolyg1MeyPrIz3eNndkd7RlBifbN+RZD8pNHJhljHnBRvO579Kn5eBey9cih0/DCXrqiJrxz2/rulNezKuLsY3m+l//IqzA38kpR5sbHEDoO+0HZcNTpU7hsc+3yj806eZ0SvJdDLxjiOoebLBLo6JebfOmaBAjplam8GLLuoJfH0DlwJkAEUEQvcx4Y0AbUAL3CmQUHWHiGrlCrWml7nlIyEhLj7Uj32z9lRXxBBrH5obgwl8RWpmCAti7K4ryFSveRMo0A67wR3APYYvF1DoSbIRABn2ikQVvPrcjiXDNwkx \ No newline at end of file diff --git a/src/allmydata/test/eliotutil.py b/src/allmydata/test/eliotutil.py index 1685744fd..dd21f1e9d 100644 --- a/src/allmydata/test/eliotutil.py +++ b/src/allmydata/test/eliotutil.py @@ -42,7 +42,6 @@ from zope.interface import ( from eliot import ( ActionType, Field, - MemoryLogger, ILogger, ) from eliot.testing import ( @@ -54,8 +53,9 @@ from twisted.python.monkey import ( MonkeyPatcher, ) -from ..util.jsonbytes import AnyBytesJSONEncoder - +from ..util.eliotutil import ( + MemoryLogger, +) _NAME = Field.for_types( u"name", @@ -71,14 +71,6 @@ RUN_TEST = ActionType( ) -# On Python 3, we want to use our custom JSON encoder when validating messages -# can be encoded to JSON: -if PY2: - _memory_logger = MemoryLogger -else: - _memory_logger = lambda: MemoryLogger(encoder=AnyBytesJSONEncoder) - - @attr.s class EliotLoggedRunTest(object): """ @@ -170,7 +162,7 @@ def with_logging( """ @wraps(test_method) def run_with_logging(*args, **kwargs): - validating_logger = _memory_logger() + validating_logger = MemoryLogger() original = swap_logger(None) try: swap_logger(_TwoLoggers(original, validating_logger)) diff --git a/src/allmydata/test/mutable/test_checker.py b/src/allmydata/test/mutable/test_checker.py index 11ba776fd..6d9145d68 100644 --- a/src/allmydata/test/mutable/test_checker.py +++ b/src/allmydata/test/mutable/test_checker.py @@ -10,14 +10,15 @@ from future.utils import PY2 if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 -from twisted.trial import unittest +from ..common import AsyncTestCase from foolscap.api import flushEventualQueue from allmydata.monitor import Monitor from allmydata.mutable.common import CorruptShareError from .util import PublishMixin, corrupt, CheckerMixin -class Checker(unittest.TestCase, CheckerMixin, PublishMixin): +class Checker(AsyncTestCase, CheckerMixin, PublishMixin): def setUp(self): + super(Checker, self).setUp() return self.publish_one() diff --git a/src/allmydata/test/mutable/test_datahandle.py b/src/allmydata/test/mutable/test_datahandle.py index 1819cba01..7aabcd8e1 100644 --- a/src/allmydata/test/mutable/test_datahandle.py +++ b/src/allmydata/test/mutable/test_datahandle.py @@ -10,11 +10,14 @@ from future.utils import PY2 if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 -from twisted.trial import unittest +from ..common import SyncTestCase from allmydata.mutable.publish import MutableData +from testtools.matchers import Equals, HasLength -class DataHandle(unittest.TestCase): + +class DataHandle(SyncTestCase): def setUp(self): + super(DataHandle, self).setUp() self.test_data = b"Test Data" * 50000 self.uploadable = MutableData(self.test_data) @@ -26,13 +29,13 @@ class DataHandle(unittest.TestCase): data = b"".join(data) start = i end = i + chunk_size - self.failUnlessEqual(data, self.test_data[start:end]) + self.assertThat(data, Equals(self.test_data[start:end])) def test_datahandle_get_size(self): actual_size = len(self.test_data) size = self.uploadable.get_size() - self.failUnlessEqual(size, actual_size) + self.assertThat(size, Equals(actual_size)) def test_datahandle_get_size_out_of_order(self): @@ -40,14 +43,14 @@ class DataHandle(unittest.TestCase): # disturbing the location of the seek pointer. chunk_size = 100 data = self.uploadable.read(chunk_size) - self.failUnlessEqual(b"".join(data), self.test_data[:chunk_size]) + self.assertThat(b"".join(data), Equals(self.test_data[:chunk_size])) # Now get the size. size = self.uploadable.get_size() - self.failUnlessEqual(size, len(self.test_data)) + self.assertThat(self.test_data, HasLength(size)) # Now get more data. We should be right where we left off. more_data = self.uploadable.read(chunk_size) start = chunk_size end = chunk_size * 2 - self.failUnlessEqual(b"".join(more_data), self.test_data[start:end]) + self.assertThat(b"".join(more_data), Equals(self.test_data[start:end])) diff --git a/src/allmydata/test/mutable/test_different_encoding.py b/src/allmydata/test/mutable/test_different_encoding.py index a5165532c..f1796d373 100644 --- a/src/allmydata/test/mutable/test_different_encoding.py +++ b/src/allmydata/test/mutable/test_different_encoding.py @@ -10,11 +10,12 @@ from future.utils import PY2 if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 -from twisted.trial import unittest +from ..common import AsyncTestCase from .util import FakeStorage, make_nodemaker -class DifferentEncoding(unittest.TestCase): +class DifferentEncoding(AsyncTestCase): def setUp(self): + super(DifferentEncoding, self).setUp() self._storage = s = FakeStorage() self.nodemaker = make_nodemaker(s) diff --git a/src/allmydata/test/mutable/test_exceptions.py b/src/allmydata/test/mutable/test_exceptions.py index 6a9b2b575..23674d036 100644 --- a/src/allmydata/test/mutable/test_exceptions.py +++ b/src/allmydata/test/mutable/test_exceptions.py @@ -11,12 +11,14 @@ from future.utils import PY2 if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 -from twisted.trial import unittest +from ..common import SyncTestCase from allmydata.mutable.common import NeedMoreDataError, UncoordinatedWriteError -class Exceptions(unittest.TestCase): + +class Exceptions(SyncTestCase): def test_repr(self): nmde = NeedMoreDataError(100, 50, 100) - self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde)) + self.assertTrue("NeedMoreDataError" in repr(nmde), msg=repr(nmde)) + self.assertTrue("NeedMoreDataError" in repr(nmde), msg=repr(nmde)) ucwe = UncoordinatedWriteError() - self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe)) + self.assertTrue("UncoordinatedWriteError" in repr(ucwe), msg=repr(ucwe)) diff --git a/src/allmydata/test/mutable/test_filehandle.py b/src/allmydata/test/mutable/test_filehandle.py index 8db02f3fd..795f60654 100644 --- a/src/allmydata/test/mutable/test_filehandle.py +++ b/src/allmydata/test/mutable/test_filehandle.py @@ -12,11 +12,13 @@ if PY2: import os from io import BytesIO -from twisted.trial import unittest +from ..common import SyncTestCase from allmydata.mutable.publish import MutableFileHandle -class FileHandle(unittest.TestCase): + +class FileHandle(SyncTestCase): def setUp(self): + super(FileHandle, self).setUp() self.test_data = b"Test Data" * 50000 self.sio = BytesIO(self.test_data) self.uploadable = MutableFileHandle(self.sio) diff --git a/src/allmydata/test/mutable/test_filenode.py b/src/allmydata/test/mutable/test_filenode.py index de03afc5a..579734433 100644 --- a/src/allmydata/test/mutable/test_filenode.py +++ b/src/allmydata/test/mutable/test_filenode.py @@ -12,7 +12,14 @@ if PY2: from six.moves import cStringIO as StringIO from twisted.internet import defer, reactor -from twisted.trial import unittest +from ..common import AsyncBrokenTestCase +from testtools.matchers import ( + Equals, + Contains, + HasLength, + Is, + IsInstance, +) from allmydata import uri, client from allmydata.util.consumer import MemoryConsumer from allmydata.interfaces import SDMF_VERSION, MDMF_VERSION, DownloadStopped @@ -29,12 +36,13 @@ from .util import ( make_peer, ) -class Filenode(unittest.TestCase, testutil.ShouldFailMixin): +class Filenode(AsyncBrokenTestCase, testutil.ShouldFailMixin): # this used to be in Publish, but we removed the limit. Some of # these tests test whether the new code correctly allows files # larger than the limit. OLD_MAX_SEGMENT_SIZE = 3500000 def setUp(self): + super(Filenode, self).setUp() self._storage = FakeStorage() self._peers = list( make_peer(self._storage, n) @@ -48,12 +56,12 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): def test_create(self): d = self.nodemaker.create_mutable_file() def _created(n): - self.failUnless(isinstance(n, MutableFileNode)) - self.failUnlessEqual(n.get_storage_index(), n._storage_index) + self.assertThat(n, IsInstance(MutableFileNode)) + self.assertThat(n.get_storage_index(), Equals(n._storage_index)) sb = self.nodemaker.storage_broker peer0 = sorted(sb.get_all_serverids())[0] shnums = self._storage._peers[peer0].keys() - self.failUnlessEqual(len(shnums), 1) + self.assertThat(shnums, HasLength(1)) d.addCallback(_created) return d @@ -61,12 +69,12 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): def test_create_mdmf(self): d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) def _created(n): - self.failUnless(isinstance(n, MutableFileNode)) - self.failUnlessEqual(n.get_storage_index(), n._storage_index) + self.assertThat(n, IsInstance(MutableFileNode)) + self.assertThat(n.get_storage_index(), Equals(n._storage_index)) sb = self.nodemaker.storage_broker peer0 = sorted(sb.get_all_serverids())[0] shnums = self._storage._peers[peer0].keys() - self.failUnlessEqual(len(shnums), 1) + self.assertThat(shnums, HasLength(1)) d.addCallback(_created) return d @@ -80,7 +88,7 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): d.addCallback(lambda ignored, v=v: self.nodemaker.create_mutable_file(version=v)) def _created(n): - self.failUnless(isinstance(n, MutableFileNode)) + self.assertThat(n, IsInstance(MutableFileNode)) self._node = n return n d.addCallback(_created) @@ -89,19 +97,19 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): d.addCallback(lambda ignored: self._node.download_best_version()) d.addCallback(lambda contents: - self.failUnlessEqual(contents, b"Contents" * 50000)) + self.assertThat(contents, Equals(b"Contents" * 50000))) return d def test_max_shares(self): self.nodemaker.default_encoding_parameters['n'] = 255 d = self.nodemaker.create_mutable_file(version=SDMF_VERSION) def _created(n): - self.failUnless(isinstance(n, MutableFileNode)) - self.failUnlessEqual(n.get_storage_index(), n._storage_index) + self.assertThat(n, IsInstance(MutableFileNode)) + self.assertThat(n.get_storage_index(), Equals(n._storage_index)) sb = self.nodemaker.storage_broker num_shares = sum([len(self._storage._peers[x].keys()) for x \ in sb.get_all_serverids()]) - self.failUnlessEqual(num_shares, 255) + self.assertThat(num_shares, Equals(255)) self._node = n return n d.addCallback(_created) @@ -113,7 +121,7 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): self._node.download_best_version()) # ...and check to make sure everything went okay. d.addCallback(lambda contents: - self.failUnlessEqual(b"contents" * 50000, contents)) + self.assertThat(b"contents" * 50000, Equals(contents))) return d def test_max_shares_mdmf(self): @@ -121,12 +129,12 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): self.nodemaker.default_encoding_parameters['n'] = 255 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) def _created(n): - self.failUnless(isinstance(n, MutableFileNode)) - self.failUnlessEqual(n.get_storage_index(), n._storage_index) + self.assertThat(n, IsInstance(MutableFileNode)) + self.assertThat(n.get_storage_index(), Equals(n._storage_index)) sb = self.nodemaker.storage_broker num_shares = sum([len(self._storage._peers[x].keys()) for x \ in sb.get_all_serverids()]) - self.failUnlessEqual(num_shares, 255) + self.assertThat(num_shares, Equals(255)) self._node = n return n d.addCallback(_created) @@ -135,20 +143,20 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): d.addCallback(lambda ignored: self._node.download_best_version()) d.addCallback(lambda contents: - self.failUnlessEqual(contents, b"contents" * 50000)) + self.assertThat(contents, Equals(b"contents" * 50000))) return d def test_mdmf_filenode_cap(self): # Test that an MDMF filenode, once created, returns an MDMF URI. d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) def _created(n): - self.failUnless(isinstance(n, MutableFileNode)) + self.assertThat(n, IsInstance(MutableFileNode)) cap = n.get_cap() - self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI)) + self.assertThat(cap, IsInstance(uri.WriteableMDMFFileURI)) rcap = n.get_readcap() - self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI)) + self.assertThat(rcap, IsInstance(uri.ReadonlyMDMFFileURI)) vcap = n.get_verify_cap() - self.failUnless(isinstance(vcap, uri.MDMFVerifierURI)) + self.assertThat(vcap, IsInstance(uri.MDMFVerifierURI)) d.addCallback(_created) return d @@ -158,13 +166,13 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): # filenode given an MDMF cap. d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) def _created(n): - self.failUnless(isinstance(n, MutableFileNode)) + self.assertThat(n, IsInstance(MutableFileNode)) s = n.get_uri() - self.failUnless(s.startswith(b"URI:MDMF")) + self.assertTrue(s.startswith(b"URI:MDMF")) n2 = self.nodemaker.create_from_cap(s) - self.failUnless(isinstance(n2, MutableFileNode)) - self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index()) - self.failUnlessEqual(n.get_uri(), n2.get_uri()) + self.assertThat(n2, IsInstance(MutableFileNode)) + self.assertThat(n.get_storage_index(), Equals(n2.get_storage_index())) + self.assertThat(n.get_uri(), Equals(n2.get_uri())) d.addCallback(_created) return d @@ -172,13 +180,13 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): def test_create_from_mdmf_readcap(self): d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) def _created(n): - self.failUnless(isinstance(n, MutableFileNode)) + self.assertThat(n, IsInstance(MutableFileNode)) s = n.get_readonly_uri() n2 = self.nodemaker.create_from_cap(s) - self.failUnless(isinstance(n2, MutableFileNode)) + self.assertThat(n2, IsInstance(MutableFileNode)) # Check that it's a readonly node - self.failUnless(n2.is_readonly()) + self.assertTrue(n2.is_readonly()) d.addCallback(_created) return d @@ -191,10 +199,10 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) def _created(n): self.uri = n.get_uri() - self.failUnlessEqual(n._protocol_version, MDMF_VERSION) + self.assertThat(n._protocol_version, Equals(MDMF_VERSION)) n2 = self.nodemaker.create_from_cap(self.uri) - self.failUnlessEqual(n2._protocol_version, MDMF_VERSION) + self.assertThat(n2._protocol_version, Equals(MDMF_VERSION)) d.addCallback(_created) return d @@ -203,14 +211,14 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): n = MutableFileNode(None, None, {"k": 3, "n": 10}, None) calls = [] def _callback(*args, **kwargs): - self.failUnlessEqual(args, (4,) ) - self.failUnlessEqual(kwargs, {"foo": 5}) + self.assertThat(args, Equals((4,))) + self.assertThat(kwargs, Equals({"foo": 5})) calls.append(1) return 6 d = n._do_serialized(_callback, 4, foo=5) def _check_callback(res): - self.failUnlessEqual(res, 6) - self.failUnlessEqual(calls, [1]) + self.assertThat(res, Equals(6)) + self.assertThat(calls, Equals([1])) d.addCallback(_check_callback) def _errback(): @@ -227,26 +235,26 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): d.addCallback(lambda res: n.get_servermap(MODE_READ)) d.addCallback(lambda smap: smap.dump(StringIO())) d.addCallback(lambda sio: - self.failUnless("3-of-10" in sio.getvalue())) + self.assertTrue("3-of-10" in sio.getvalue())) d.addCallback(lambda res: n.overwrite(MutableData(b"contents 1"))) - d.addCallback(lambda res: self.failUnlessIdentical(res, None)) + d.addCallback(lambda res: self.assertThat(res, Is(None))) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 1")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 1"))) d.addCallback(lambda res: n.get_size_of_best_version()) d.addCallback(lambda size: - self.failUnlessEqual(size, len(b"contents 1"))) + self.assertThat(size, Equals(len(b"contents 1")))) d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2"))) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 2")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 2"))) d.addCallback(lambda res: n.get_servermap(MODE_WRITE)) d.addCallback(lambda smap: n.upload(MutableData(b"contents 3"), smap)) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 3")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 3"))) d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING)) d.addCallback(lambda smap: n.download_version(smap, smap.best_recoverable_version())) - d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 3")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 3"))) # test a file that is large enough to overcome the # mapupdate-to-retrieve data caching (i.e. make the shares larger # than the default readsize, which is 2000 bytes). A 15kB file @@ -254,7 +262,7 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): d.addCallback(lambda res: n.overwrite(MutableData(b"large size file" * 1000))) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: - self.failUnlessEqual(res, b"large size file" * 1000)) + self.assertThat(res, Equals(b"large size file" * 1000))) return d d.addCallback(_created) return d @@ -268,7 +276,7 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): n.get_servermap(MODE_READ)) def _then(servermap): dumped = servermap.dump(StringIO()) - self.failUnlessIn("3-of-10", dumped.getvalue()) + self.assertThat(dumped.getvalue(), Contains("3-of-10")) d.addCallback(_then) # Now overwrite the contents with some new contents. We want # to make them big enough to force the file to be uploaded @@ -280,7 +288,7 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): d.addCallback(lambda ignored: n.download_best_version()) d.addCallback(lambda data: - self.failUnlessEqual(data, big_contents)) + self.assertThat(data, Equals(big_contents))) # Overwrite the contents again with some new contents. As # before, they need to be big enough to force multiple # segments, so that we make the downloader deal with @@ -292,7 +300,7 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): d.addCallback(lambda ignored: n.download_best_version()) d.addCallback(lambda data: - self.failUnlessEqual(data, bigger_contents)) + self.assertThat(data, Equals(bigger_contents))) return d d.addCallback(_created) return d @@ -323,7 +331,7 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): # Now we'll retrieve it into a pausing consumer. c = PausingConsumer() d = version.read(c) - d.addCallback(lambda ign: self.failUnlessEqual(c.size, len(data))) + d.addCallback(lambda ign: self.assertThat(c.size, Equals(len(data)))) c2 = PausingAndStoppingConsumer() d.addCallback(lambda ign: @@ -360,14 +368,14 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): self.uri = node.get_uri() # also confirm that the cap has no extension fields pieces = self.uri.split(b":") - self.failUnlessEqual(len(pieces), 4) + self.assertThat(pieces, HasLength(4)) return node.overwrite(MutableData(b"contents1" * 100000)) def _then(ignored): node = self.nodemaker.create_from_cap(self.uri) return node.download_best_version() def _downloaded(data): - self.failUnlessEqual(data, b"contents1" * 100000) + self.assertThat(data, Equals(b"contents1" * 100000)) d.addCallback(_created) d.addCallback(_then) d.addCallback(_downloaded) @@ -397,11 +405,11 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): d = self.nodemaker.create_mutable_file(upload1) def _created(n): d = n.download_best_version() - d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 1")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 1"))) upload2 = MutableData(b"contents 2") d.addCallback(lambda res: n.overwrite(upload2)) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 2")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 2"))) return d d.addCallback(_created) return d @@ -415,15 +423,15 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): def _created(n): d = n.download_best_version() d.addCallback(lambda data: - self.failUnlessEqual(data, initial_contents)) + self.assertThat(data, Equals(initial_contents))) uploadable2 = MutableData(initial_contents + b"foobarbaz") d.addCallback(lambda ignored: n.overwrite(uploadable2)) d.addCallback(lambda ignored: n.download_best_version()) d.addCallback(lambda data: - self.failUnlessEqual(data, initial_contents + - b"foobarbaz")) + self.assertThat(data, Equals(initial_contents + + b"foobarbaz"))) return d d.addCallback(_created) return d @@ -431,33 +439,33 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): def test_create_with_initial_contents_function(self): data = b"initial contents" def _make_contents(n): - self.failUnless(isinstance(n, MutableFileNode)) + self.assertThat(n, IsInstance(MutableFileNode)) key = n.get_writekey() - self.failUnless(isinstance(key, bytes), key) - self.failUnlessEqual(len(key), 16) # AES key size + self.assertTrue(isinstance(key, bytes), key) + self.assertThat(key, HasLength(16)) # AES key size return MutableData(data) d = self.nodemaker.create_mutable_file(_make_contents) def _created(n): return n.download_best_version() d.addCallback(_created) - d.addCallback(lambda data2: self.failUnlessEqual(data2, data)) + d.addCallback(lambda data2: self.assertThat(data2, Equals(data))) return d def test_create_mdmf_with_initial_contents_function(self): data = b"initial contents" * 100000 def _make_contents(n): - self.failUnless(isinstance(n, MutableFileNode)) + self.assertThat(n, IsInstance(MutableFileNode)) key = n.get_writekey() - self.failUnless(isinstance(key, bytes), key) - self.failUnlessEqual(len(key), 16) + self.assertTrue(isinstance(key, bytes), key) + self.assertThat(key, HasLength(16)) return MutableData(data) d = self.nodemaker.create_mutable_file(_make_contents, version=MDMF_VERSION) d.addCallback(lambda n: n.download_best_version()) d.addCallback(lambda data2: - self.failUnlessEqual(data2, data)) + self.assertThat(data2, Equals(data))) return d @@ -476,7 +484,7 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): d = n.get_servermap(MODE_READ) d.addCallback(lambda servermap: servermap.best_recoverable_version()) d.addCallback(lambda verinfo: - self.failUnlessEqual(verinfo[0], expected_seqnum, which)) + self.assertThat(verinfo[0], Equals(expected_seqnum), which)) return d def test_modify(self): @@ -513,36 +521,36 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): def _created(n): d = n.modify(_modifier) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, b"line1line2")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m")) d.addCallback(lambda res: n.modify(_non_modifier)) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, b"line1line2")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non")) d.addCallback(lambda res: n.modify(_none_modifier)) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, b"line1line2")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none")) d.addCallback(lambda res: self.shouldFail(ValueError, "error_modifier", None, n.modify, _error_modifier)) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, b"line1line2")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err")) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, b"line1line2")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big")) d.addCallback(lambda res: n.modify(_ucw_error_modifier)) - d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2)) + d.addCallback(lambda res: self.assertThat(calls, HasLength(2))) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, - b"line1line2line3")) + d.addCallback(lambda res: self.assertThat(res, + Equals(b"line1line2line3"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw")) def _reset_ucw_error_modifier(res): @@ -557,10 +565,10 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): # will only be one larger than the previous test, not two (i.e. 4 # instead of 5). d.addCallback(lambda res: n.modify(_ucw_error_non_modifier)) - d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2)) + d.addCallback(lambda res: self.assertThat(calls, HasLength(2))) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, - b"line1line2line3")) + d.addCallback(lambda res: self.assertThat(res, + Equals(b"line1line2line3"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw")) d.addCallback(lambda res: n.modify(_toobig_modifier)) return d @@ -596,7 +604,7 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): def _created(n): d = n.modify(_modifier) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, b"line1line2")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m")) d.addCallback(lambda res: @@ -605,7 +613,7 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): n.modify, _ucw_error_modifier, _backoff_stopper)) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, b"line1line2")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop")) def _reset_ucw_error_modifier(res): @@ -615,8 +623,8 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): d.addCallback(lambda res: n.modify(_ucw_error_modifier, _backoff_pauser)) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, - b"line1line2line3")) + d.addCallback(lambda res: self.assertThat(res, + Equals(b"line1line2line3"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause")) d.addCallback(lambda res: @@ -625,8 +633,8 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): n.modify, _always_ucw_error_modifier, giveuper.delay)) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, - b"line1line2line3")) + d.addCallback(lambda res: self.assertThat(res, + Equals(b"line1line2line3"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup")) return d @@ -641,23 +649,23 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): d.addCallback(lambda res: n.get_servermap(MODE_READ)) d.addCallback(lambda smap: smap.dump(StringIO())) d.addCallback(lambda sio: - self.failUnless("3-of-10" in sio.getvalue())) + self.assertTrue("3-of-10" in sio.getvalue())) d.addCallback(lambda res: n.overwrite(MutableData(b"contents 1"))) - d.addCallback(lambda res: self.failUnlessIdentical(res, None)) + d.addCallback(lambda res: self.assertThat(res, Is(None))) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 1")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 1"))) d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2"))) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 2")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 2"))) d.addCallback(lambda res: n.get_servermap(MODE_WRITE)) d.addCallback(lambda smap: n.upload(MutableData(b"contents 3"), smap)) d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 3")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 3"))) d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING)) d.addCallback(lambda smap: n.download_version(smap, smap.best_recoverable_version())) - d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 3")) + d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 3"))) return d d.addCallback(_created) return d @@ -673,14 +681,14 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): return n.get_servermap(MODE_READ) d.addCallback(_created) d.addCallback(lambda ignored: - self.failUnlessEqual(self.n.get_size(), 0)) + self.assertThat(self.n.get_size(), Equals(0))) d.addCallback(lambda ignored: self.n.overwrite(MutableData(b"foobarbaz"))) d.addCallback(lambda ignored: - self.failUnlessEqual(self.n.get_size(), 9)) + self.assertThat(self.n.get_size(), Equals(9))) d.addCallback(lambda ignored: self.nodemaker.create_mutable_file(MutableData(b"foobarbaz"))) d.addCallback(_created) d.addCallback(lambda ignored: - self.failUnlessEqual(self.n.get_size(), 9)) + self.assertThat(self.n.get_size(), Equals(9))) return d diff --git a/src/allmydata/test/mutable/test_interoperability.py b/src/allmydata/test/mutable/test_interoperability.py index 5d7414907..496da1d2a 100644 --- a/src/allmydata/test/mutable/test_interoperability.py +++ b/src/allmydata/test/mutable/test_interoperability.py @@ -11,14 +11,15 @@ if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 import os, base64 -from twisted.trial import unittest +from ..common import AsyncTestCase +from testtools.matchers import HasLength from allmydata import uri from allmydata.storage.common import storage_index_to_dir from allmydata.util import fileutil from .. import common_util as testutil from ..no_network import GridTestMixin -class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): +class Interoperability(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin): sdmf_old_shares = {} sdmf_old_shares[0] = b"VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA=" sdmf_old_shares[1] = b"VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA=" @@ -53,7 +54,7 @@ class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixi sharedata) # ...and verify that the shares are there. shares = self.find_uri_shares(self.sdmf_old_cap) - assert len(shares) == 10 + self.assertThat(shares, HasLength(10)) def test_new_downloader_can_read_old_shares(self): self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares" @@ -62,5 +63,5 @@ class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixi nm = self.g.clients[0].nodemaker n = nm.create_from_cap(self.sdmf_old_cap) d = n.download_best_version() - d.addCallback(self.failUnlessEqual, self.sdmf_old_contents) + d.addCallback(self.assertEqual, self.sdmf_old_contents) return d diff --git a/src/allmydata/test/mutable/test_multiple_encodings.py b/src/allmydata/test/mutable/test_multiple_encodings.py index 12c5be051..2291b60d8 100644 --- a/src/allmydata/test/mutable/test_multiple_encodings.py +++ b/src/allmydata/test/mutable/test_multiple_encodings.py @@ -10,7 +10,8 @@ from future.utils import PY2 if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 -from twisted.trial import unittest +from ..common import AsyncTestCase +from testtools.matchers import Equals from allmydata.interfaces import SDMF_VERSION from allmydata.monitor import Monitor from foolscap.logging import log @@ -20,8 +21,9 @@ from allmydata.mutable.servermap import ServerMap, ServermapUpdater from ..common_util import DevNullDictionary from .util import FakeStorage, make_nodemaker -class MultipleEncodings(unittest.TestCase): +class MultipleEncodings(AsyncTestCase): def setUp(self): + super(MultipleEncodings, self).setUp() self.CONTENTS = b"New contents go here" self.uploadable = MutableData(self.CONTENTS) self._storage = FakeStorage() @@ -159,6 +161,6 @@ class MultipleEncodings(unittest.TestCase): d.addCallback(lambda res: fn3.download_best_version()) def _retrieved(new_contents): # the current specified behavior is "first version recoverable" - self.failUnlessEqual(new_contents, contents1) + self.assertThat(new_contents, Equals(contents1)) d.addCallback(_retrieved) return d diff --git a/src/allmydata/test/mutable/test_multiple_versions.py b/src/allmydata/test/mutable/test_multiple_versions.py index 460cde4b3..c9b7e71df 100644 --- a/src/allmydata/test/mutable/test_multiple_versions.py +++ b/src/allmydata/test/mutable/test_multiple_versions.py @@ -10,15 +10,17 @@ from future.utils import PY2 if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 -from twisted.trial import unittest +from ..common import AsyncTestCase +from testtools.matchers import Equals, HasLength from allmydata.monitor import Monitor from allmydata.mutable.common import MODE_CHECK, MODE_READ from .util import PublishMixin, CheckerMixin -class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin): +class MultipleVersions(AsyncTestCase, PublishMixin, CheckerMixin): def setUp(self): + super(MultipleVersions, self).setUp() return self.publish_multiple() def test_multiple_versions(self): @@ -26,7 +28,7 @@ class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin): # should get the latest one self._set_versions(dict([(i,2) for i in (0,2,4,6,8)])) d = self._fn.download_best_version() - d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4])) + d.addCallback(lambda res: self.assertThat(res, Equals(self.CONTENTS[4]))) # and the checker should report problems d.addCallback(lambda res: self._fn.check(Monitor())) d.addCallback(self.check_bad, "test_multiple_versions") @@ -35,23 +37,23 @@ class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin): d.addCallback(lambda res: self._set_versions(dict([(i,2) for i in range(10)]))) d.addCallback(lambda res: self._fn.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2])) + d.addCallback(lambda res: self.assertThat(res, Equals(self.CONTENTS[2]))) # if exactly one share is at version 3, we should still get v2 d.addCallback(lambda res: self._set_versions({0:3})) d.addCallback(lambda res: self._fn.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2])) + d.addCallback(lambda res: self.assertThat(res, Equals(self.CONTENTS[2]))) # but the servermap should see the unrecoverable version. This # depends upon the single newer share being queried early. d.addCallback(lambda res: self._fn.get_servermap(MODE_READ)) def _check_smap(smap): - self.failUnlessEqual(len(smap.unrecoverable_versions()), 1) + self.assertThat(smap.unrecoverable_versions(), HasLength(1)) newer = smap.unrecoverable_newer_versions() - self.failUnlessEqual(len(newer), 1) + self.assertThat(newer, HasLength(1)) verinfo, health = list(newer.items())[0] - self.failUnlessEqual(verinfo[0], 4) - self.failUnlessEqual(health, (1,3)) - self.failIf(smap.needs_merge()) + self.assertThat(verinfo[0], Equals(4)) + self.assertThat(health, Equals((1,3))) + self.assertThat(smap.needs_merge(), Equals(False)) d.addCallback(_check_smap) # if we have a mix of two parallel versions (s4a and s4b), we could # recover either @@ -60,13 +62,13 @@ class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin): 1:4,3:4,5:4,7:4,9:4})) d.addCallback(lambda res: self._fn.get_servermap(MODE_READ)) def _check_smap_mixed(smap): - self.failUnlessEqual(len(smap.unrecoverable_versions()), 0) + self.assertThat(smap.unrecoverable_versions(), HasLength(0)) newer = smap.unrecoverable_newer_versions() - self.failUnlessEqual(len(newer), 0) - self.failUnless(smap.needs_merge()) + self.assertThat(newer, HasLength(0)) + self.assertTrue(smap.needs_merge()) d.addCallback(_check_smap_mixed) d.addCallback(lambda res: self._fn.download_best_version()) - d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or + d.addCallback(lambda res: self.assertTrue(res == self.CONTENTS[3] or res == self.CONTENTS[4])) return d @@ -86,12 +88,12 @@ class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin): d = self._fn.modify(_modify) d.addCallback(lambda res: self._fn.download_best_version()) expected = self.CONTENTS[2] + b" modified" - d.addCallback(lambda res: self.failUnlessEqual(res, expected)) + d.addCallback(lambda res: self.assertThat(res, Equals(expected))) # and the servermap should indicate that the outlier was replaced too d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK)) def _check_smap(smap): - self.failUnlessEqual(smap.highest_seqnum(), 5) - self.failUnlessEqual(len(smap.unrecoverable_versions()), 0) - self.failUnlessEqual(len(smap.recoverable_versions()), 1) + self.assertThat(smap.highest_seqnum(), Equals(5)) + self.assertThat(smap.unrecoverable_versions(), HasLength(0)) + self.assertThat(smap.recoverable_versions(), HasLength(1)) d.addCallback(_check_smap) return d diff --git a/src/allmydata/test/mutable/test_problems.py b/src/allmydata/test/mutable/test_problems.py index 86a367596..d3a779905 100644 --- a/src/allmydata/test/mutable/test_problems.py +++ b/src/allmydata/test/mutable/test_problems.py @@ -11,7 +11,8 @@ if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 import os, base64 -from twisted.trial import unittest +from ..common import AsyncTestCase +from testtools.matchers import HasLength from twisted.internet import defer from foolscap.logging import log from allmydata import uri @@ -25,7 +26,6 @@ from allmydata.mutable.common import \ NotEnoughServersError from allmydata.mutable.publish import MutableData from allmydata.storage.common import storage_index_to_dir -from ..common import TEST_RSA_KEY_SIZE from ..no_network import GridTestMixin from .. import common_util as testutil from ..common_util import DevNullDictionary @@ -61,7 +61,7 @@ class FirstServerGetsDeleted(object): return (True, {}) return retval -class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): +class Problems(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin): def do_publish_surprise(self, version): self.basedir = "mutable/Problems/test_publish_surprise_%s" % version self.set_up_grid() @@ -198,8 +198,8 @@ class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): def _overwritten_again(smap): # Make sure that all shares were updated by making sure that # there aren't any other versions in the sharemap. - self.failUnlessEqual(len(smap.recoverable_versions()), 1) - self.failUnlessEqual(len(smap.unrecoverable_versions()), 0) + self.assertThat(smap.recoverable_versions(), HasLength(1)) + self.assertThat(smap.unrecoverable_versions(), HasLength(0)) d.addCallback(_overwritten_again) return d @@ -218,7 +218,7 @@ class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): # use #467 static-server-selection to disable permutation and force # the choice of server for share[0]. - d = nm.key_generator.generate(TEST_RSA_KEY_SIZE) + d = nm.key_generator.generate() def _got_key(keypair): (pubkey, privkey) = keypair nm.key_generator = SameKeyGenerator(pubkey, privkey) @@ -240,7 +240,7 @@ class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): # that ought to work def _got_node(n): d = n.download_best_version() - d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 1")) + d.addCallback(lambda res: self.assertEquals(res, b"contents 1")) # now break the second peer def _break_peer1(res): self.g.break_server(self.server1.get_serverid()) @@ -248,7 +248,7 @@ class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2"))) # that ought to work too d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 2")) + d.addCallback(lambda res: self.assertEquals(res, b"contents 2")) def _explain_error(f): print(f) if f.check(NotEnoughServersError): @@ -280,7 +280,7 @@ class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): d = nm.create_mutable_file(MutableData(b"contents 1")) def _created(n): d = n.download_best_version() - d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 1")) + d.addCallback(lambda res: self.assertEquals(res, b"contents 1")) # now break one of the remaining servers def _break_second_server(res): self.g.break_server(peerids[1]) @@ -288,7 +288,7 @@ class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2"))) # that ought to work too d.addCallback(lambda res: n.download_best_version()) - d.addCallback(lambda res: self.failUnlessEqual(res, b"contents 2")) + d.addCallback(lambda res: self.assertEquals(res, b"contents 2")) return d d.addCallback(_created) return d @@ -419,7 +419,7 @@ class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): return self._node.download_version(servermap, ver) d.addCallback(_then) d.addCallback(lambda data: - self.failUnlessEqual(data, CONTENTS)) + self.assertEquals(data, CONTENTS)) return d def test_1654(self): diff --git a/src/allmydata/test/mutable/test_repair.py b/src/allmydata/test/mutable/test_repair.py index fb1caa974..deddb8d92 100644 --- a/src/allmydata/test/mutable/test_repair.py +++ b/src/allmydata/test/mutable/test_repair.py @@ -10,7 +10,8 @@ from future.utils import PY2 if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 -from twisted.trial import unittest +from ..common import AsyncTestCase +from testtools.matchers import Equals, HasLength from allmydata.interfaces import IRepairResults, ICheckAndRepairResults from allmydata.monitor import Monitor from allmydata.mutable.common import MODE_CHECK @@ -19,7 +20,7 @@ from allmydata.mutable.repairer import MustForceRepairError from ..common import ShouldFailMixin from .util import PublishMixin -class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin): +class Repair(AsyncTestCase, PublishMixin, ShouldFailMixin): def get_shares(self, s): all_shares = {} # maps (peerid, shnum) to share data @@ -40,8 +41,8 @@ class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin): d.addCallback(lambda res: self._fn.check(Monitor())) d.addCallback(lambda check_results: self._fn.repair(check_results)) def _check_results(rres): - self.failUnless(IRepairResults.providedBy(rres)) - self.failUnless(rres.get_successful()) + self.assertThat(IRepairResults.providedBy(rres), Equals(True)) + self.assertThat(rres.get_successful(), Equals(True)) # TODO: examine results self.copy_shares() @@ -50,11 +51,11 @@ class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin): new_shares = self.old_shares[1] # TODO: this really shouldn't change anything. When we implement # a "minimal-bandwidth" repairer", change this test to assert: - #self.failUnlessEqual(new_shares, initial_shares) + #self.assertThat(new_shares, Equals(initial_shares)) # all shares should be in the same place as before - self.failUnlessEqual(set(initial_shares.keys()), - set(new_shares.keys())) + self.assertThat(set(initial_shares.keys()), + Equals(set(new_shares.keys()))) # but they should all be at a newer seqnum. The IV will be # different, so the roothash will be too. for key in initial_shares: @@ -70,19 +71,19 @@ class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin): IV1, k1, N1, segsize1, datalen1, o1) = unpack_header(new_shares[key]) - self.failUnlessEqual(version0, version1) - self.failUnlessEqual(seqnum0+1, seqnum1) - self.failUnlessEqual(k0, k1) - self.failUnlessEqual(N0, N1) - self.failUnlessEqual(segsize0, segsize1) - self.failUnlessEqual(datalen0, datalen1) + self.assertThat(version0, Equals(version1)) + self.assertThat(seqnum0+1, Equals(seqnum1)) + self.assertThat(k0, Equals(k1)) + self.assertThat(N0, Equals(N1)) + self.assertThat(segsize0, Equals(segsize1)) + self.assertThat(datalen0, Equals(datalen1)) d.addCallback(_check_results) return d def failIfSharesChanged(self, ignored=None): old_shares = self.old_shares[-2] current_shares = self.old_shares[-1] - self.failUnlessEqual(old_shares, current_shares) + self.assertThat(old_shares, Equals(current_shares)) def _test_whether_repairable(self, publisher, nshares, expected_result): @@ -96,12 +97,12 @@ class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin): d.addCallback(_delete_some_shares) d.addCallback(lambda ign: self._fn.check(Monitor())) def _check(cr): - self.failIf(cr.is_healthy()) - self.failUnlessEqual(cr.is_recoverable(), expected_result) + self.assertThat(cr.is_healthy(), Equals(False)) + self.assertThat(cr.is_recoverable(), Equals(expected_result)) return cr d.addCallback(_check) d.addCallback(lambda check_results: self._fn.repair(check_results)) - d.addCallback(lambda crr: self.failUnlessEqual(crr.get_successful(), expected_result)) + d.addCallback(lambda crr: self.assertThat(crr.get_successful(), Equals(expected_result))) return d def test_unrepairable_0shares(self): @@ -136,7 +137,7 @@ class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin): del shares[peerid][shnum] d.addCallback(_delete_some_shares) d.addCallback(lambda ign: self._fn.check_and_repair(Monitor())) - d.addCallback(lambda crr: self.failUnlessEqual(crr.get_repair_successful(), expected_result)) + d.addCallback(lambda crr: self.assertThat(crr.get_repair_successful(), Equals(expected_result))) return d def test_unrepairable_0shares_checkandrepair(self): @@ -181,13 +182,13 @@ class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin): self._fn.repair(check_results, force=True)) # this should give us 10 shares of the highest roothash def _check_repair_results(rres): - self.failUnless(rres.get_successful()) + self.assertThat(rres.get_successful(), Equals(True)) pass # TODO d.addCallback(_check_repair_results) d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK)) def _check_smap(smap): - self.failUnlessEqual(len(smap.recoverable_versions()), 1) - self.failIf(smap.unrecoverable_versions()) + self.assertThat(smap.recoverable_versions(), HasLength(1)) + self.assertThat(smap.unrecoverable_versions(), HasLength(0)) # now, which should have won? roothash_s4a = self.get_roothash_for(3) roothash_s4b = self.get_roothash_for(4) @@ -196,9 +197,9 @@ class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin): else: expected_contents = self.CONTENTS[3] new_versionid = smap.best_recoverable_version() - self.failUnlessEqual(new_versionid[0], 5) # seqnum 5 + self.assertThat(new_versionid[0], Equals(5)) # seqnum 5 d2 = self._fn.download_version(smap, new_versionid) - d2.addCallback(self.failUnlessEqual, expected_contents) + d2.addCallback(self.assertEqual, expected_contents) return d2 d.addCallback(_check_smap) return d @@ -216,19 +217,19 @@ class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin): d.addCallback(lambda check_results: self._fn.repair(check_results)) # this should give us 10 shares of v3 def _check_repair_results(rres): - self.failUnless(rres.get_successful()) + self.assertThat(rres.get_successful(), Equals(True)) pass # TODO d.addCallback(_check_repair_results) d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK)) def _check_smap(smap): - self.failUnlessEqual(len(smap.recoverable_versions()), 1) - self.failIf(smap.unrecoverable_versions()) + self.assertThat(smap.recoverable_versions(), HasLength(1)) + self.assertThat(smap.unrecoverable_versions(), HasLength(0)) # now, which should have won? expected_contents = self.CONTENTS[3] new_versionid = smap.best_recoverable_version() - self.failUnlessEqual(new_versionid[0], 5) # seqnum 5 + self.assertThat(new_versionid[0], Equals(5)) # seqnum 5 d2 = self._fn.download_version(smap, new_versionid) - d2.addCallback(self.failUnlessEqual, expected_contents) + d2.addCallback(self.assertEquals, expected_contents) return d2 d.addCallback(_check_smap) return d @@ -256,12 +257,12 @@ class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin): d.addCallback(_get_readcap) d.addCallback(lambda res: self._fn3.check_and_repair(Monitor())) def _check_results(crr): - self.failUnless(ICheckAndRepairResults.providedBy(crr)) + self.assertThat(ICheckAndRepairResults.providedBy(crr), Equals(True)) # we should detect the unhealthy, but skip over mutable-readcap # repairs until #625 is fixed - self.failIf(crr.get_pre_repair_results().is_healthy()) - self.failIf(crr.get_repair_attempted()) - self.failIf(crr.get_post_repair_results().is_healthy()) + self.assertThat(crr.get_pre_repair_results().is_healthy(), Equals(False)) + self.assertThat(crr.get_repair_attempted(), Equals(False)) + self.assertThat(crr.get_post_repair_results().is_healthy(), Equals(False)) d.addCallback(_check_results) return d @@ -281,6 +282,6 @@ class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin): d.addCallback(lambda ign: self._fn2.check(Monitor())) d.addCallback(lambda check_results: self._fn2.repair(check_results)) def _check(crr): - self.failUnlessEqual(crr.get_successful(), True) + self.assertThat(crr.get_successful(), Equals(True)) d.addCallback(_check) return d diff --git a/src/allmydata/test/mutable/test_roundtrip.py b/src/allmydata/test/mutable/test_roundtrip.py index 79292b000..96ecdf640 100644 --- a/src/allmydata/test/mutable/test_roundtrip.py +++ b/src/allmydata/test/mutable/test_roundtrip.py @@ -11,7 +11,8 @@ if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 from six.moves import cStringIO as StringIO -from twisted.trial import unittest +from ..common import AsyncTestCase +from testtools.matchers import Equals, HasLength, Contains from twisted.internet import defer from allmydata.util import base32, consumer @@ -23,8 +24,9 @@ from allmydata.mutable.retrieve import Retrieve from .util import PublishMixin, make_storagebroker, corrupt from .. import common_util as testutil -class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin): +class Roundtrip(AsyncTestCase, testutil.ShouldFailMixin, PublishMixin): def setUp(self): + super(Roundtrip, self).setUp() return self.publish_one() def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None): @@ -73,11 +75,11 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin): def _do_retrieve(servermap): self._smap = servermap #self.dump_servermap(servermap) - self.failUnlessEqual(len(servermap.recoverable_versions()), 1) + self.assertThat(servermap.recoverable_versions(), HasLength(1)) return self.do_download(servermap) d.addCallback(_do_retrieve) def _retrieved(new_contents): - self.failUnlessEqual(new_contents, self.CONTENTS) + self.assertThat(new_contents, Equals(self.CONTENTS)) d.addCallback(_retrieved) # we should be able to re-use the same servermap, both with and # without updating it. @@ -132,10 +134,10 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin): # back empty d = self.make_servermap(sb=sb2) def _check_servermap(servermap): - self.failUnlessEqual(servermap.best_recoverable_version(), None) - self.failIf(servermap.recoverable_versions()) - self.failIf(servermap.unrecoverable_versions()) - self.failIf(servermap.all_servers()) + self.assertThat(servermap.best_recoverable_version(), Equals(None)) + self.assertFalse(servermap.recoverable_versions()) + self.assertFalse(servermap.unrecoverable_versions()) + self.assertFalse(servermap.all_servers()) d.addCallback(_check_servermap) return d @@ -154,7 +156,7 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin): self._fn._storage_broker = self._storage_broker return self._fn.download_best_version() def _retrieved(new_contents): - self.failUnlessEqual(new_contents, self.CONTENTS) + self.assertThat(new_contents, Equals(self.CONTENTS)) d.addCallback(_restore) d.addCallback(_retrieved) return d @@ -178,13 +180,13 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin): # should be noted in the servermap's list of problems. if substring: allproblems = [str(f) for f in servermap.get_problems()] - self.failUnlessIn(substring, "".join(allproblems)) + self.assertThat("".join(allproblems), Contains(substring)) return servermap if should_succeed: d1 = self._fn.download_version(servermap, ver, fetch_privkey) d1.addCallback(lambda new_contents: - self.failUnlessEqual(new_contents, self.CONTENTS)) + self.assertThat(new_contents, Equals(self.CONTENTS))) else: d1 = self.shouldFail(NotEnoughSharesError, "_corrupt_all(offset=%s)" % (offset,), @@ -207,7 +209,7 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin): # and the dump should mention the problems s = StringIO() dump = servermap.dump(s).getvalue() - self.failUnless("30 PROBLEMS" in dump, dump) + self.assertTrue("30 PROBLEMS" in dump, msg=dump) d.addCallback(_check_servermap) return d @@ -299,8 +301,8 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin): # in NotEnoughSharesError, since each share will look invalid def _check(res): f = res[0] - self.failUnless(f.check(NotEnoughSharesError)) - self.failUnless("uncoordinated write" in str(f)) + self.assertThat(f.check(NotEnoughSharesError), HasLength(1)) + self.assertThat("uncoordinated write" in str(f), Equals(True)) return self._test_corrupt_all(1, "ran out of servers", corrupt_early=False, failure_checker=_check) @@ -309,7 +311,7 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin): def test_corrupt_all_block_late(self): def _check(res): f = res[0] - self.failUnless(f.check(NotEnoughSharesError)) + self.assertTrue(f.check(NotEnoughSharesError)) return self._test_corrupt_all("share_data", "block hash tree failure", corrupt_early=False, failure_checker=_check) @@ -330,9 +332,9 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin): shnums_to_corrupt=list(range(0, N-k))) d.addCallback(lambda res: self.make_servermap()) def _do_retrieve(servermap): - self.failUnless(servermap.get_problems()) - self.failUnless("pubkey doesn't match fingerprint" - in str(servermap.get_problems()[0])) + self.assertTrue(servermap.get_problems()) + self.assertThat("pubkey doesn't match fingerprint" + in str(servermap.get_problems()[0]), Equals(True)) ver = servermap.best_recoverable_version() r = Retrieve(self._fn, self._storage_broker, servermap, ver) c = consumer.MemoryConsumer() @@ -340,7 +342,7 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin): d.addCallback(_do_retrieve) d.addCallback(lambda mc: b"".join(mc.chunks)) d.addCallback(lambda new_contents: - self.failUnlessEqual(new_contents, self.CONTENTS)) + self.assertThat(new_contents, Equals(self.CONTENTS))) return d @@ -355,11 +357,11 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin): self.make_servermap()) def _do_retrieve(servermap): ver = servermap.best_recoverable_version() - self.failUnless(ver) + self.assertTrue(ver) return self._fn.download_best_version() d.addCallback(_do_retrieve) d.addCallback(lambda new_contents: - self.failUnlessEqual(new_contents, self.CONTENTS)) + self.assertThat(new_contents, Equals(self.CONTENTS))) return d diff --git a/src/allmydata/test/mutable/test_servermap.py b/src/allmydata/test/mutable/test_servermap.py index e8f933977..505d31e73 100644 --- a/src/allmydata/test/mutable/test_servermap.py +++ b/src/allmydata/test/mutable/test_servermap.py @@ -11,7 +11,8 @@ from future.utils import PY2 if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 -from twisted.trial import unittest +from ..common import AsyncTestCase +from testtools.matchers import Equals, NotEquals, HasLength from twisted.internet import defer from allmydata.monitor import Monitor from allmydata.mutable.common import \ @@ -20,8 +21,9 @@ from allmydata.mutable.publish import MutableData from allmydata.mutable.servermap import ServerMap, ServermapUpdater from .util import PublishMixin -class Servermap(unittest.TestCase, PublishMixin): +class Servermap(AsyncTestCase, PublishMixin): def setUp(self): + super(Servermap, self).setUp() return self.publish_one() def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None, @@ -42,17 +44,17 @@ class Servermap(unittest.TestCase, PublishMixin): return d def failUnlessOneRecoverable(self, sm, num_shares): - self.failUnlessEqual(len(sm.recoverable_versions()), 1) - self.failUnlessEqual(len(sm.unrecoverable_versions()), 0) + self.assertThat(sm.recoverable_versions(), HasLength(1)) + self.assertThat(sm.unrecoverable_versions(), HasLength(0)) best = sm.best_recoverable_version() - self.failIfEqual(best, None) - self.failUnlessEqual(sm.recoverable_versions(), set([best])) - self.failUnlessEqual(len(sm.shares_available()), 1) - self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10)) + self.assertThat(best, NotEquals(None)) + self.assertThat(sm.recoverable_versions(), Equals(set([best]))) + self.assertThat(sm.shares_available(), HasLength(1)) + self.assertThat(sm.shares_available()[best], Equals((num_shares, 3, 10))) shnum, servers = list(sm.make_sharemap().items())[0] server = list(servers)[0] - self.failUnlessEqual(sm.version_on_server(server, shnum), best) - self.failUnlessEqual(sm.version_on_server(server, 666), None) + self.assertThat(sm.version_on_server(server, shnum), Equals(best)) + self.assertThat(sm.version_on_server(server, 666), Equals(None)) return sm def test_basic(self): @@ -117,7 +119,7 @@ class Servermap(unittest.TestCase, PublishMixin): v = sm.best_recoverable_version() vm = sm.make_versionmap() shares = list(vm[v]) - self.failUnlessEqual(len(shares), 6) + self.assertThat(shares, HasLength(6)) self._corrupted = set() # mark the first 5 shares as corrupt, then update the servermap. # The map should not have the marked shares it in any more, and @@ -135,18 +137,17 @@ class Servermap(unittest.TestCase, PublishMixin): shares = list(vm[v]) for (server, shnum) in self._corrupted: server_shares = sm.debug_shares_on_server(server) - self.failIf(shnum in server_shares, - "%d was in %s" % (shnum, server_shares)) - self.failUnlessEqual(len(shares), 5) + self.assertFalse(shnum in server_shares, "%d was in %s" % (shnum, server_shares)) + self.assertThat(shares, HasLength(5)) d.addCallback(_check_map) return d def failUnlessNoneRecoverable(self, sm): - self.failUnlessEqual(len(sm.recoverable_versions()), 0) - self.failUnlessEqual(len(sm.unrecoverable_versions()), 0) + self.assertThat(sm.recoverable_versions(), HasLength(0)) + self.assertThat(sm.unrecoverable_versions(), HasLength(0)) best = sm.best_recoverable_version() - self.failUnlessEqual(best, None) - self.failUnlessEqual(len(sm.shares_available()), 0) + self.assertThat(best, Equals(None)) + self.assertThat(sm.shares_available(), HasLength(0)) def test_no_shares(self): self._storage._peers = {} # delete all shares @@ -168,12 +169,12 @@ class Servermap(unittest.TestCase, PublishMixin): return d def failUnlessNotQuiteEnough(self, sm): - self.failUnlessEqual(len(sm.recoverable_versions()), 0) - self.failUnlessEqual(len(sm.unrecoverable_versions()), 1) + self.assertThat(sm.recoverable_versions(), HasLength(0)) + self.assertThat(sm.unrecoverable_versions(), HasLength(1)) best = sm.best_recoverable_version() - self.failUnlessEqual(best, None) - self.failUnlessEqual(len(sm.shares_available()), 1) - self.failUnlessEqual(list(sm.shares_available().values())[0], (2,3,10) ) + self.assertThat(best, Equals(None)) + self.assertThat(sm.shares_available(), HasLength(1)) + self.assertThat(list(sm.shares_available().values())[0], Equals((2,3,10))) return sm def test_not_quite_enough_shares(self): @@ -193,7 +194,7 @@ class Servermap(unittest.TestCase, PublishMixin): d.addCallback(lambda res: ms(mode=MODE_CHECK)) d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm)) d.addCallback(lambda sm: - self.failUnlessEqual(len(sm.make_sharemap()), 2)) + self.assertThat(sm.make_sharemap(), HasLength(2))) d.addCallback(lambda res: ms(mode=MODE_ANYTHING)) d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm)) d.addCallback(lambda res: ms(mode=MODE_WRITE)) @@ -216,7 +217,7 @@ class Servermap(unittest.TestCase, PublishMixin): # Calling make_servermap also updates the servermap in the mode # that we specify, so we just need to see what it says. def _check_servermap(sm): - self.failUnlessEqual(len(sm.recoverable_versions()), 1) + self.assertThat(sm.recoverable_versions(), HasLength(1)) d.addCallback(_check_servermap) return d @@ -229,10 +230,10 @@ class Servermap(unittest.TestCase, PublishMixin): self.make_servermap(mode=MODE_WRITE, update_range=(1, 2))) def _check_servermap(sm): # 10 shares - self.failUnlessEqual(len(sm.update_data), 10) + self.assertThat(sm.update_data, HasLength(10)) # one version for data in sm.update_data.values(): - self.failUnlessEqual(len(data), 1) + self.assertThat(data, HasLength(1)) d.addCallback(_check_servermap) return d @@ -244,5 +245,5 @@ class Servermap(unittest.TestCase, PublishMixin): d.addCallback(lambda ignored: self.make_servermap(mode=MODE_CHECK)) d.addCallback(lambda servermap: - self.failUnlessEqual(len(servermap.recoverable_versions()), 1)) + self.assertThat(servermap.recoverable_versions(), HasLength(1))) return d diff --git a/src/allmydata/test/mutable/test_update.py b/src/allmydata/test/mutable/test_update.py index da5d53e4c..c3ba1e9f7 100644 --- a/src/allmydata/test/mutable/test_update.py +++ b/src/allmydata/test/mutable/test_update.py @@ -11,7 +11,12 @@ if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 import re -from twisted.trial import unittest +from ..common import AsyncTestCase +from testtools.matchers import ( + Equals, + IsInstance, + GreaterThan, +) from twisted.internet import defer from allmydata.interfaces import MDMF_VERSION from allmydata.mutable.filenode import MutableFileNode @@ -25,7 +30,7 @@ from .. import common_util as testutil # this up. SEGSIZE = 128*1024 -class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): +class Update(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin): def setUp(self): GridTestMixin.setUp(self) self.basedir = self.mktemp() @@ -35,14 +40,14 @@ class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): # self.data should be at least three segments long. td = b"testdata " self.data = td*(int(3*SEGSIZE//len(td))+10) # currently about 400kB - assert len(self.data) > 3*SEGSIZE + self.assertThat(len(self.data), GreaterThan(3*SEGSIZE)) self.small_data = b"test data" * 10 # 90 B; SDMF def do_upload_sdmf(self): d = self.nm.create_mutable_file(MutableData(self.small_data)) def _then(n): - assert isinstance(n, MutableFileNode) + self.assertThat(n, IsInstance(MutableFileNode)) self.sdmf_node = n d.addCallback(_then) return d @@ -51,7 +56,7 @@ class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): d = self.nm.create_mutable_file(MutableData(self.data), version=MDMF_VERSION) def _then(n): - assert isinstance(n, MutableFileNode) + self.assertThat(n, IsInstance(MutableFileNode)) self.mdmf_node = n d.addCallback(_then) return d @@ -185,7 +190,7 @@ class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): len(self.data))) d.addCallback(lambda ign: self.mdmf_node.download_best_version()) d.addCallback(lambda results: - self.failUnlessEqual(results, new_data)) + self.assertThat(results, Equals(new_data))) return d d0.addCallback(_run) return d0 @@ -201,7 +206,7 @@ class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): len(self.small_data))) d.addCallback(lambda ign: self.sdmf_node.download_best_version()) d.addCallback(lambda results: - self.failUnlessEqual(results, new_data)) + self.assertThat(results, Equals(new_data))) return d d0.addCallback(_run) return d0 @@ -221,7 +226,7 @@ class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): replace_offset)) d.addCallback(lambda ign: self.mdmf_node.download_best_version()) d.addCallback(lambda results: - self.failUnlessEqual(results, new_data)) + self.assertThat(results, Equals(new_data))) return d d0.addCallback(_run) return d0 @@ -242,7 +247,7 @@ class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): replace_offset)) d.addCallback(lambda ignored: self.mdmf_node.download_best_version()) d.addCallback(lambda results: - self.failUnlessEqual(results, new_data)) + self.assertThat(results, Equals(new_data))) return d d0.addCallback(_run) return d0 diff --git a/src/allmydata/test/mutable/test_version.py b/src/allmydata/test/mutable/test_version.py index 042305c24..d5c44f204 100644 --- a/src/allmydata/test/mutable/test_version.py +++ b/src/allmydata/test/mutable/test_version.py @@ -14,7 +14,13 @@ import os from six.moves import cStringIO as StringIO from twisted.internet import defer -from twisted.trial import unittest +from ..common import AsyncTestCase +from testtools.matchers import ( + Equals, + IsInstance, + HasLength, + Contains, +) from allmydata import uri from allmydata.interfaces import SDMF_VERSION, MDMF_VERSION @@ -29,7 +35,7 @@ from ..no_network import GridTestMixin from .util import PublishMixin from .. import common_util as testutil -class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ +class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \ PublishMixin): def setUp(self): GridTestMixin.setUp(self) @@ -47,8 +53,8 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ d = self.nm.create_mutable_file(MutableData(data), version=MDMF_VERSION) def _then(n): - assert isinstance(n, MutableFileNode) - assert n._protocol_version == MDMF_VERSION + self.assertThat(n, IsInstance(MutableFileNode)) + self.assertThat(n._protocol_version, Equals(MDMF_VERSION)) self.mdmf_node = n return n d.addCallback(_then) @@ -59,8 +65,8 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ data = self.small_data d = self.nm.create_mutable_file(MutableData(data)) def _then(n): - assert isinstance(n, MutableFileNode) - assert n._protocol_version == SDMF_VERSION + self.assertThat(n, IsInstance(MutableFileNode)) + self.assertThat(n._protocol_version, Equals(SDMF_VERSION)) self.sdmf_node = n return n d.addCallback(_then) @@ -69,9 +75,9 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ def do_upload_empty_sdmf(self): d = self.nm.create_mutable_file(MutableData(b"")) def _then(n): - assert isinstance(n, MutableFileNode) + self.assertThat(n, IsInstance(MutableFileNode)) self.sdmf_zero_length_node = n - assert n._protocol_version == SDMF_VERSION + self.assertThat(n._protocol_version, Equals(SDMF_VERSION)) return n d.addCallback(_then) return d @@ -95,7 +101,7 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ debug.find_shares(fso) sharefiles = fso.stdout.getvalue().splitlines() expected = self.nm.default_encoding_parameters["n"] - self.failUnlessEqual(len(sharefiles), expected) + self.assertThat(sharefiles, HasLength(expected)) do = debug.DumpOptions() do["filename"] = sharefiles[0] @@ -103,17 +109,17 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ debug.dump_share(do) output = do.stdout.getvalue() lines = set(output.splitlines()) - self.failUnless("Mutable slot found:" in lines, output) - self.failUnless(" share_type: MDMF" in lines, output) - self.failUnless(" num_extra_leases: 0" in lines, output) - self.failUnless(" MDMF contents:" in lines, output) - self.failUnless(" seqnum: 1" in lines, output) - self.failUnless(" required_shares: 3" in lines, output) - self.failUnless(" total_shares: 10" in lines, output) - self.failUnless(" segsize: 131073" in lines, output) - self.failUnless(" datalen: %d" % len(self.data) in lines, output) + self.assertTrue("Mutable slot found:" in lines, output) + self.assertTrue(" share_type: MDMF" in lines, output) + self.assertTrue(" num_extra_leases: 0" in lines, output) + self.assertTrue(" MDMF contents:" in lines, output) + self.assertTrue(" seqnum: 1" in lines, output) + self.assertTrue(" required_shares: 3" in lines, output) + self.assertTrue(" total_shares: 10" in lines, output) + self.assertTrue(" segsize: 131073" in lines, output) + self.assertTrue(" datalen: %d" % len(self.data) in lines, output) vcap = str(n.get_verify_cap().to_string(), "utf-8") - self.failUnless(" verify-cap: %s" % vcap in lines, output) + self.assertTrue(" verify-cap: %s" % vcap in lines, output) cso = debug.CatalogSharesOptions() cso.nodedirs = fso.nodedirs cso.stdout = StringIO() @@ -122,13 +128,13 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ shares = cso.stdout.getvalue().splitlines() oneshare = shares[0] # all shares should be MDMF self.failIf(oneshare.startswith("UNKNOWN"), oneshare) - self.failUnless(oneshare.startswith("MDMF"), oneshare) + self.assertTrue(oneshare.startswith("MDMF"), oneshare) fields = oneshare.split() - self.failUnlessEqual(fields[0], "MDMF") - self.failUnlessEqual(fields[1].encode("ascii"), storage_index) - self.failUnlessEqual(fields[2], "3/10") - self.failUnlessEqual(fields[3], "%d" % len(self.data)) - self.failUnless(fields[4].startswith("#1:"), fields[3]) + self.assertThat(fields[0], Equals("MDMF")) + self.assertThat(fields[1].encode("ascii"), Equals(storage_index)) + self.assertThat(fields[2], Equals("3/10")) + self.assertThat(fields[3], Equals("%d" % len(self.data))) + self.assertTrue(fields[4].startswith("#1:"), fields[3]) # the rest of fields[4] is the roothash, which depends upon # encryption salts and is not constant. fields[5] is the # remaining time on the longest lease, which is timing dependent. @@ -140,11 +146,11 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ d = self.do_upload() d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version()) d.addCallback(lambda bv: - self.failUnlessEqual(bv.get_sequence_number(), 1)) + self.assertThat(bv.get_sequence_number(), Equals(1))) d.addCallback(lambda ignored: self.sdmf_node.get_best_readable_version()) d.addCallback(lambda bv: - self.failUnlessEqual(bv.get_sequence_number(), 1)) + self.assertThat(bv.get_sequence_number(), Equals(1))) # Now update. The sequence number in both cases should be 1 in # both cases. def _do_update(ignored): @@ -158,11 +164,11 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ d.addCallback(lambda ignored: self.mdmf_node.get_best_readable_version()) d.addCallback(lambda bv: - self.failUnlessEqual(bv.get_sequence_number(), 2)) + self.assertThat(bv.get_sequence_number(), Equals(2))) d.addCallback(lambda ignored: self.sdmf_node.get_best_readable_version()) d.addCallback(lambda bv: - self.failUnlessEqual(bv.get_sequence_number(), 2)) + self.assertThat(bv.get_sequence_number(), Equals(2))) return d @@ -175,10 +181,10 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ def _then(ign): mdmf_uri = self.mdmf_node.get_uri() cap = uri.from_string(mdmf_uri) - self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI)) + self.assertTrue(isinstance(cap, uri.WriteableMDMFFileURI)) readonly_mdmf_uri = self.mdmf_node.get_readonly_uri() cap = uri.from_string(readonly_mdmf_uri) - self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI)) + self.assertTrue(isinstance(cap, uri.ReadonlyMDMFFileURI)) d.addCallback(_then) return d @@ -189,16 +195,16 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version()) def _check_mdmf(bv): n = self.mdmf_node - self.failUnlessEqual(bv.get_writekey(), n.get_writekey()) - self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index()) - self.failIf(bv.is_readonly()) + self.assertThat(bv.get_writekey(), Equals(n.get_writekey())) + self.assertThat(bv.get_storage_index(), Equals(n.get_storage_index())) + self.assertFalse(bv.is_readonly()) d.addCallback(_check_mdmf) d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version()) def _check_sdmf(bv): n = self.sdmf_node - self.failUnlessEqual(bv.get_writekey(), n.get_writekey()) - self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index()) - self.failIf(bv.is_readonly()) + self.assertThat(bv.get_writekey(), Equals(n.get_writekey())) + self.assertThat(bv.get_storage_index(), Equals(n.get_storage_index())) + self.assertFalse(bv.is_readonly()) d.addCallback(_check_sdmf) return d @@ -206,21 +212,21 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ def test_get_readonly_version(self): d = self.do_upload() d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version()) - d.addCallback(lambda bv: self.failUnless(bv.is_readonly())) + d.addCallback(lambda bv: self.assertTrue(bv.is_readonly())) # Attempting to get a mutable version of a mutable file from a # filenode initialized with a readcap should return a readonly # version of that same node. d.addCallback(lambda ign: self.mdmf_node.get_readonly()) d.addCallback(lambda ro: ro.get_best_mutable_version()) - d.addCallback(lambda v: self.failUnless(v.is_readonly())) + d.addCallback(lambda v: self.assertTrue(v.is_readonly())) d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version()) - d.addCallback(lambda bv: self.failUnless(bv.is_readonly())) + d.addCallback(lambda bv: self.assertTrue(bv.is_readonly())) d.addCallback(lambda ign: self.sdmf_node.get_readonly()) d.addCallback(lambda ro: ro.get_best_mutable_version()) - d.addCallback(lambda v: self.failUnless(v.is_readonly())) + d.addCallback(lambda v: self.assertTrue(v.is_readonly())) return d @@ -232,13 +238,13 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ d.addCallback(lambda ignored: self.mdmf_node.download_best_version()) d.addCallback(lambda data: - self.failUnlessEqual(data, b"foo bar baz" * 100000)) + self.assertThat(data, Equals(b"foo bar baz" * 100000))) d.addCallback(lambda ignored: self.sdmf_node.overwrite(new_small_data)) d.addCallback(lambda ignored: self.sdmf_node.download_best_version()) d.addCallback(lambda data: - self.failUnlessEqual(data, b"foo bar baz" * 10)) + self.assertThat(data, Equals(b"foo bar baz" * 10))) return d @@ -250,13 +256,13 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ d.addCallback(lambda ignored: self.mdmf_node.download_best_version()) d.addCallback(lambda data: - self.failUnlessIn(b"modified", data)) + self.assertThat(data, Contains(b"modified"))) d.addCallback(lambda ignored: self.sdmf_node.modify(modifier)) d.addCallback(lambda ignored: self.sdmf_node.download_best_version()) d.addCallback(lambda data: - self.failUnlessIn(b"modified", data)) + self.assertThat(data, Contains(b"modified"))) return d @@ -271,13 +277,13 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ d.addCallback(lambda ignored: self.mdmf_node.download_best_version()) d.addCallback(lambda data: - self.failUnlessIn(b"modified", data)) + self.assertThat(data, Contains(b"modified"))) d.addCallback(lambda ignored: self.sdmf_node.modify(modifier)) d.addCallback(lambda ignored: self.sdmf_node.download_best_version()) d.addCallback(lambda data: - self.failUnlessIn(b"modified", data)) + self.assertThat(data, Contains(b"modified"))) return d @@ -308,13 +314,13 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ d.addCallback(lambda ignored: self._fn.download_version(self.servermap, self.version1)) d.addCallback(lambda results: - self.failUnlessEqual(self.CONTENTS[self.version1_index], - results)) + self.assertThat(self.CONTENTS[self.version1_index], + Equals(results))) d.addCallback(lambda ignored: self._fn.download_version(self.servermap, self.version2)) d.addCallback(lambda results: - self.failUnlessEqual(self.CONTENTS[self.version2_index], - results)) + self.assertThat(self.CONTENTS[self.version2_index], + Equals(results))) return d @@ -344,7 +350,7 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ for i in range(0, len(expected), step): d2.addCallback(lambda ignored, i=i: version.read(c, i, step)) d2.addCallback(lambda ignored: - self.failUnlessEqual(expected, b"".join(c.chunks))) + self.assertThat(expected, Equals(b"".join(c.chunks)))) return d2 d.addCallback(_read_data) return d @@ -447,16 +453,16 @@ class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \ d2 = defer.succeed(None) d2.addCallback(lambda ignored: version.read(c)) d2.addCallback(lambda ignored: - self.failUnlessEqual(expected, b"".join(c.chunks))) + self.assertThat(expected, Equals(b"".join(c.chunks)))) d2.addCallback(lambda ignored: version.read(c2, offset=0, size=len(expected))) d2.addCallback(lambda ignored: - self.failUnlessEqual(expected, b"".join(c2.chunks))) + self.assertThat(expected, Equals(b"".join(c2.chunks)))) return d2 d.addCallback(_read_data) d.addCallback(lambda ignored: node.download_best_version()) - d.addCallback(lambda data: self.failUnlessEqual(expected, data)) + d.addCallback(lambda data: self.assertThat(expected, Equals(data))) return d def test_read_and_download_mdmf(self): diff --git a/src/allmydata/test/mutable/util.py b/src/allmydata/test/mutable/util.py index dac61a6e3..bed350652 100644 --- a/src/allmydata/test/mutable/util.py +++ b/src/allmydata/test/mutable/util.py @@ -25,7 +25,6 @@ from allmydata.storage_client import StorageFarmBroker from allmydata.mutable.layout import MDMFSlotReadProxy from allmydata.mutable.publish import MutableData from ..common import ( - TEST_RSA_KEY_SIZE, EMPTY_CLIENT_CONFIG, ) @@ -287,7 +286,7 @@ def make_storagebroker_with_peers(peers): return storage_broker -def make_nodemaker(s=None, num_peers=10, keysize=TEST_RSA_KEY_SIZE): +def make_nodemaker(s=None, num_peers=10): """ Make a ``NodeMaker`` connected to some number of fake storage servers. @@ -298,20 +297,20 @@ def make_nodemaker(s=None, num_peers=10, keysize=TEST_RSA_KEY_SIZE): the node maker. """ storage_broker = make_storagebroker(s, num_peers) - return make_nodemaker_with_storage_broker(storage_broker, keysize) + return make_nodemaker_with_storage_broker(storage_broker) -def make_nodemaker_with_peers(peers, keysize=TEST_RSA_KEY_SIZE): +def make_nodemaker_with_peers(peers): """ Make a ``NodeMaker`` connected to the given storage servers. :param list peers: The storage servers to associate with the node maker. """ storage_broker = make_storagebroker_with_peers(peers) - return make_nodemaker_with_storage_broker(storage_broker, keysize) + return make_nodemaker_with_storage_broker(storage_broker) -def make_nodemaker_with_storage_broker(storage_broker, keysize): +def make_nodemaker_with_storage_broker(storage_broker): """ Make a ``NodeMaker`` using the given storage broker. @@ -319,8 +318,6 @@ def make_nodemaker_with_storage_broker(storage_broker, keysize): """ sh = client.SecretHolder(b"lease secret", b"convergence secret") keygen = client.KeyGenerator() - if keysize: - keygen.set_default_keysize(keysize) nodemaker = NodeMaker(storage_broker, sh, None, None, None, {"k": 3, "n": 10}, SDMF_VERSION, keygen) diff --git a/src/allmydata/test/no_network.py b/src/allmydata/test/no_network.py index 2f75f9274..ed742e624 100644 --- a/src/allmydata/test/no_network.py +++ b/src/allmydata/test/no_network.py @@ -17,8 +17,7 @@ from __future__ import unicode_literals # This should be useful for tests which want to examine and/or manipulate the # uploaded shares, checker/verifier/repairer tests, etc. The clients have no -# Tubs, so it is not useful for tests that involve a Helper or the -# control.furl . +# Tubs, so it is not useful for tests that involve a Helper. from future.utils import PY2 if PY2: @@ -26,6 +25,11 @@ if PY2: from past.builtins import unicode from six import ensure_text +try: + from typing import Dict, Callable +except ImportError: + pass + import os from base64 import b32encode from functools import ( @@ -46,7 +50,9 @@ from allmydata.util.assertutil import _assert from allmydata import uri as tahoe_uri from allmydata.client import _Client -from allmydata.storage.server import StorageServer, storage_index_to_dir +from allmydata.storage.server import ( + StorageServer, storage_index_to_dir, FoolscapStorageServer, +) from allmydata.util import fileutil, idlib, hashutil from allmydata.util.hashutil import permute_server_hash from allmydata.util.fileutil import abspath_expanduser_unicode @@ -55,7 +61,6 @@ from allmydata.storage_client import ( _StorageServer, ) from .common import ( - TEST_RSA_KEY_SIZE, SameProcessStreamEndpointAssigner, ) @@ -251,7 +256,6 @@ def create_no_network_client(basedir): client = _NoNetworkClient( config, main_tub=None, - control_tub=None, i2p_provider=None, tor_provider=None, introducer_clients=[], @@ -274,8 +278,6 @@ class _NoNetworkClient(_Client): # type: ignore # tahoe-lafs/ticket/3573 pass def init_introducer_client(self): pass - def create_control_tub(self): - pass def create_log_tub(self): pass def setup_logging(self): @@ -284,8 +286,6 @@ class _NoNetworkClient(_Client): # type: ignore # tahoe-lafs/ticket/3573 service.MultiService.startService(self) def stopService(self): return service.MultiService.stopService(self) - def init_control(self): - pass def init_helper(self): pass def init_key_gen(self): @@ -392,7 +392,6 @@ class NoNetworkGrid(service.MultiService): if not c: c = yield create_no_network_client(clientdir) - c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE) c.nodeid = clientid c.short_nodeid = b32encode(clientid).lower()[:8] @@ -418,7 +417,7 @@ class NoNetworkGrid(service.MultiService): ss.setServiceParent(middleman) serverid = ss.my_nodeid self.servers_by_number[i] = ss - wrapper = wrap_storage_server(ss) + wrapper = wrap_storage_server(FoolscapStorageServer(ss)) self.wrappers_by_id[serverid] = wrapper self.proxies_by_id[serverid] = NoNetworkServer(serverid, wrapper) self.rebuild_serverlist() @@ -485,6 +484,18 @@ class GridTestMixin(object): def set_up_grid(self, num_clients=1, num_servers=10, client_config_hooks={}, oneshare=False): + """ + Create a Tahoe-LAFS storage grid. + + :param num_clients: See ``NoNetworkGrid`` + :param num_servers: See `NoNetworkGrid`` + :param client_config_hooks: See ``NoNetworkGrid`` + + :param bool oneshare: If ``True`` then the first client node is + configured with ``n == k == happy == 1``. + + :return: ``None`` + """ # self.basedir must be set port_assigner = SameProcessStreamEndpointAssigner() port_assigner.setUp() @@ -563,6 +574,15 @@ class GridTestMixin(object): return sorted(shares) def copy_shares(self, uri): + # type: (bytes) -> Dict[bytes, bytes] + """ + Read all of the share files for the given capability from the storage area + of the storage servers created by ``set_up_grid``. + + :param bytes uri: A Tahoe-LAFS data capability. + + :return: A ``dict`` mapping share file names to share file contents. + """ shares = {} for (shnum, serverid, sharefile) in self.find_uri_shares(uri): with open(sharefile, "rb") as f: @@ -607,10 +627,15 @@ class GridTestMixin(object): f.write(corruptdata) def corrupt_all_shares(self, uri, corruptor, debug=False): + # type: (bytes, Callable[[bytes, bool], bytes], bool) -> None + """ + Apply ``corruptor`` to the contents of all share files associated with a + given capability and replace the share file contents with its result. + """ for (i_shnum, i_serverid, i_sharefile) in self.find_uri_shares(uri): with open(i_sharefile, "rb") as f: sharedata = f.read() - corruptdata = corruptor(sharedata, debug=debug) + corruptdata = corruptor(sharedata, debug) with open(i_sharefile, "wb") as f: f.write(corruptdata) diff --git a/src/allmydata/test/strategies.py b/src/allmydata/test/strategies.py index c0f558ef6..2bb23a373 100644 --- a/src/allmydata/test/strategies.py +++ b/src/allmydata/test/strategies.py @@ -16,6 +16,7 @@ from hypothesis.strategies import ( one_of, builds, binary, + integers, ) from ..uri import ( @@ -119,3 +120,17 @@ def dir2_mdmf_capabilities(): MDMFDirectoryURI, mdmf_capabilities(), ) + +def offsets(min_value=0, max_value=2 ** 16): + """ + Build ``int`` values that could be used as valid offsets into a sequence + (such as share data in a share file). + """ + return integers(min_value, max_value) + +def lengths(min_value=1, max_value=2 ** 16): + """ + Build ``int`` values that could be used as valid lengths of data (such as + share data in a share file). + """ + return integers(min_value, max_value) diff --git a/src/allmydata/test/test_auth.py b/src/allmydata/test/test_auth.py index d5198d326..bfe717f79 100644 --- a/src/allmydata/test/test_auth.py +++ b/src/allmydata/test/test_auth.py @@ -8,7 +8,16 @@ from __future__ import unicode_literals from future.utils import PY2 if PY2: - from future.builtins import str # noqa: F401 + from future.builtins import str, open # noqa: F401 + +from hypothesis import ( + given, +) +from hypothesis.strategies import ( + text, + characters, + lists, +) from twisted.trial import unittest from twisted.python import filepath @@ -38,25 +47,184 @@ dBSD8940XU3YW+oeq8e+p3yQ2GinHfeJ3BYQyNQLuMAJ -----END RSA PRIVATE KEY----- """) -DUMMY_ACCOUNTS = u"""\ -alice herpassword URI:DIR2:aaaaaaaaaaaaaaaaaaaaaaaaaa:1111111111111111111111111111111111111111111111111111 -bob sekrit URI:DIR2:bbbbbbbbbbbbbbbbbbbbbbbbbb:2222222222222222222222222222222222222222222222222222 +DUMMY_KEY_DSA = keys.Key.fromString("""\ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABsQAAAAdzc2gtZH +NzAAAAgQDKMh/ELaiP21LYRBuPbUy7dUhv/XZwV7aS1LzxSP+KaJvtDOei8X76XEAfkqX+ +aGh9eup+BLkezrV6LlpO9uPzhY8ChlKpkvw5PZKv/2agSrVxZyG7yEzHNtSBQXE6qNMwIk +N/ycXLGCqyAhQSzRhLz9ETNaslRDLo7YyVWkiuAQAAABUA5nTatFKux5EqZS4EarMWFRBU +i1UAAACAFpkkK+JsPixSTPyn0DNMoGKA0Klqy8h61Ds6pws+4+aJQptUBshpwNw1ypo7MO ++goDZy3wwdWtURTPGMgesNdEfxp8L2/kqE4vpMK0myoczCqOiWMeNB/x1AStbSkBI8WmHW +2htgsC01xbaix/FrA3edK8WEyv+oIxlbV1FkrPkAAACANb0EpCc8uoR4/32rO2JLsbcLBw +H5wc2khe7AKkIa9kUknRIRvoCZUtXF5XuXXdRmnpVEm2KcsLdtZjip43asQcqgt0Kz3nuF +kAf7bI98G1waFUimcCSPsal4kCmW2HC11sg/BWOt5qczX/0/3xVxpo6juUeBq9ncnFTvPX +5fOlEAAAHoJkFqHiZBah4AAAAHc3NoLWRzcwAAAIEAyjIfxC2oj9tS2EQbj21Mu3VIb/12 +cFe2ktS88Uj/imib7QznovF++lxAH5Kl/mhofXrqfgS5Hs61ei5aTvbj84WPAoZSqZL8OT +2Sr/9moEq1cWchu8hMxzbUgUFxOqjTMCJDf8nFyxgqsgIUEs0YS8/REzWrJUQy6O2MlVpI +rgEAAAAVAOZ02rRSrseRKmUuBGqzFhUQVItVAAAAgBaZJCvibD4sUkz8p9AzTKBigNCpas +vIetQ7OqcLPuPmiUKbVAbIacDcNcqaOzDvoKA2ct8MHVrVEUzxjIHrDXRH8afC9v5KhOL6 +TCtJsqHMwqjoljHjQf8dQErW0pASPFph1tobYLAtNcW2osfxawN3nSvFhMr/qCMZW1dRZK +z5AAAAgDW9BKQnPLqEeP99qztiS7G3CwcB+cHNpIXuwCpCGvZFJJ0SEb6AmVLVxeV7l13U +Zp6VRJtinLC3bWY4qeN2rEHKoLdCs957hZAH+2yPfBtcGhVIpnAkj7GpeJAplthwtdbIPw +VjreanM1/9P98VcaaOo7lHgavZ3JxU7z1+XzpRAAAAFQC7360pZLbv7PFt4BPFJ8zAHxAe +QwAAAA5leGFya3VuQGJhcnlvbgECAwQ= +-----END OPENSSH PRIVATE KEY----- +""") -# dennis password URI:DIR2:aaaaaaaaaaaaaaaaaaaaaaaaaa:1111111111111111111111111111111111111111111111111111 +ACCOUNTS = u"""\ +# dennis {key} URI:DIR2:aaaaaaaaaaaaaaaaaaaaaaaaaa:1111111111111111111111111111111111111111111111111111 carol {key} URI:DIR2:cccccccccccccccccccccccccc:3333333333333333333333333333333333333333333333333333 """.format(key=str(DUMMY_KEY.public().toString("openssh"), "ascii")).encode("ascii") +# Python str.splitlines considers NEXT LINE, LINE SEPARATOR, and PARAGRAPH +# separator to be line separators, too. However, file.readlines() does not... +LINE_SEPARATORS = ( + '\x0a', # line feed + '\x0b', # vertical tab + '\x0c', # form feed + '\x0d', # carriage return +) + +class AccountFileParserTests(unittest.TestCase): + """ + Tests for ``load_account_file`` and its helper functions. + """ + @given(lists( + text(alphabet=characters( + blacklist_categories=( + # Surrogates are an encoding trick to help out UTF-16. + # They're not necessary to represent any non-surrogate code + # point in unicode. They're also not legal individually but + # only in pairs. + 'Cs', + ), + # Exclude all our line separators too. + blacklist_characters=("\n", "\r"), + )), + )) + def test_ignore_comments(self, lines): + """ + ``auth.content_lines`` filters out lines beginning with `#` and empty + lines. + """ + expected = set() + + # It's not clear that real files and StringIO behave sufficiently + # similarly to use the latter instead of the former here. In + # particular, they seem to have distinct and incompatible + # line-splitting rules. + bufpath = self.mktemp() + with open(bufpath, "wt", encoding="utf-8") as buf: + for line in lines: + stripped = line.strip() + is_content = stripped and not stripped.startswith("#") + if is_content: + expected.add(stripped) + buf.write(line + "\n") + + with auth.open_account_file(bufpath) as buf: + actual = set(auth.content_lines(buf)) + + self.assertEqual(expected, actual) + + def test_parse_accounts(self): + """ + ``auth.parse_accounts`` accepts an iterator of account lines and returns + an iterator of structured account data. + """ + alice_key = DUMMY_KEY.public().toString("openssh").decode("utf-8") + alice_cap = "URI:DIR2:aaaa:1111" + + bob_key = DUMMY_KEY_DSA.public().toString("openssh").decode("utf-8") + bob_cap = "URI:DIR2:aaaa:2222" + self.assertEqual( + list(auth.parse_accounts([ + "alice {} {}".format(alice_key, alice_cap), + "bob {} {}".format(bob_key, bob_cap), + ])), + [ + ("alice", DUMMY_KEY.public(), alice_cap), + ("bob", DUMMY_KEY_DSA.public(), bob_cap), + ], + ) + + def test_parse_accounts_rejects_passwords(self): + """ + The iterator returned by ``auth.parse_accounts`` raises ``ValueError`` + when processing reaches a line that has what looks like a password + instead of an ssh key. + """ + with self.assertRaises(ValueError): + list(auth.parse_accounts(["alice apassword URI:DIR2:aaaa:1111"])) + + def test_create_account_maps(self): + """ + ``auth.create_account_maps`` accepts an iterator of structured account + data and returns two mappings: one from account name to rootcap, the + other from account name to public keys. + """ + alice_cap = "URI:DIR2:aaaa:1111" + alice_key = DUMMY_KEY.public() + bob_cap = "URI:DIR2:aaaa:2222" + bob_key = DUMMY_KEY_DSA.public() + accounts = [ + ("alice", alice_key, alice_cap), + ("bob", bob_key, bob_cap), + ] + self.assertEqual( + auth.create_account_maps(accounts), + ({ + b"alice": alice_cap.encode("utf-8"), + b"bob": bob_cap.encode("utf-8"), + }, + { + b"alice": [alice_key], + b"bob": [bob_key], + }), + ) + + def test_load_account_file(self): + """ + ``auth.load_account_file`` accepts an iterator of serialized account lines + and returns two mappings: one from account name to rootcap, the other + from account name to public keys. + """ + alice_key = DUMMY_KEY.public().toString("openssh").decode("utf-8") + alice_cap = "URI:DIR2:aaaa:1111" + + bob_key = DUMMY_KEY_DSA.public().toString("openssh").decode("utf-8") + bob_cap = "URI:DIR2:aaaa:2222" + + accounts = [ + "alice {} {}".format(alice_key, alice_cap), + "bob {} {}".format(bob_key, bob_cap), + "# carol {} {}".format(alice_key, alice_cap), + ] + + self.assertEqual( + auth.load_account_file(accounts), + ({ + b"alice": alice_cap.encode("utf-8"), + b"bob": bob_cap.encode("utf-8"), + }, + { + b"alice": [DUMMY_KEY.public()], + b"bob": [DUMMY_KEY_DSA.public()], + }), + ) + + class AccountFileCheckerKeyTests(unittest.TestCase): """ Tests for key handling done by allmydata.frontends.auth.AccountFileChecker. """ def setUp(self): self.account_file = filepath.FilePath(self.mktemp()) - self.account_file.setContent(DUMMY_ACCOUNTS) + self.account_file.setContent(ACCOUNTS) abspath = abspath_expanduser_unicode(str(self.account_file.path)) self.checker = auth.AccountFileChecker(None, abspath) - def test_unknown_user_ssh(self): + def test_unknown_user(self): """ AccountFileChecker.requestAvatarId returns a Deferred that fires with UnauthorizedLogin if called with an SSHPrivateKey object with a @@ -67,67 +235,6 @@ class AccountFileCheckerKeyTests(unittest.TestCase): avatarId = self.checker.requestAvatarId(key_credentials) return self.assertFailure(avatarId, error.UnauthorizedLogin) - def test_unknown_user_password(self): - """ - AccountFileChecker.requestAvatarId returns a Deferred that fires with - UnauthorizedLogin if called with an SSHPrivateKey object with a - username not present in the account file. - - We use a commented out user, so we're also checking that comments are - skipped. - """ - key_credentials = credentials.UsernamePassword(b"dennis", b"password") - d = self.checker.requestAvatarId(key_credentials) - return self.assertFailure(d, error.UnauthorizedLogin) - - def test_password_auth_user_with_ssh_key(self): - """ - AccountFileChecker.requestAvatarId returns a Deferred that fires with - UnauthorizedLogin if called with an SSHPrivateKey object for a username - only associated with a password in the account file. - """ - key_credentials = credentials.SSHPrivateKey( - b"alice", b"md5", None, None, None) - avatarId = self.checker.requestAvatarId(key_credentials) - return self.assertFailure(avatarId, error.UnauthorizedLogin) - - def test_password_auth_user_with_correct_password(self): - """ - AccountFileChecker.requestAvatarId returns a Deferred that fires with - the user if the correct password is given. - """ - key_credentials = credentials.UsernamePassword(b"alice", b"herpassword") - d = self.checker.requestAvatarId(key_credentials) - def authenticated(avatarId): - self.assertEqual( - (b"alice", - b"URI:DIR2:aaaaaaaaaaaaaaaaaaaaaaaaaa:1111111111111111111111111111111111111111111111111111"), - (avatarId.username, avatarId.rootcap)) - return d - - def test_password_auth_user_with_correct_hashed_password(self): - """ - AccountFileChecker.requestAvatarId returns a Deferred that fires with - the user if the correct password is given in hashed form. - """ - key_credentials = credentials.UsernameHashedPassword(b"alice", b"herpassword") - d = self.checker.requestAvatarId(key_credentials) - def authenticated(avatarId): - self.assertEqual( - (b"alice", - b"URI:DIR2:aaaaaaaaaaaaaaaaaaaaaaaaaa:1111111111111111111111111111111111111111111111111111"), - (avatarId.username, avatarId.rootcap)) - return d - - def test_password_auth_user_with_wrong_password(self): - """ - AccountFileChecker.requestAvatarId returns a Deferred that fires with - UnauthorizedLogin if the wrong password is given. - """ - key_credentials = credentials.UsernamePassword(b"alice", b"WRONG") - avatarId = self.checker.requestAvatarId(key_credentials) - return self.assertFailure(avatarId, error.UnauthorizedLogin) - def test_unrecognized_key(self): """ AccountFileChecker.requestAvatarId returns a Deferred that fires with diff --git a/src/allmydata/test/test_checker.py b/src/allmydata/test/test_checker.py index f56ecd089..3d64d4976 100644 --- a/src/allmydata/test/test_checker.py +++ b/src/allmydata/test/test_checker.py @@ -773,13 +773,13 @@ class AddLease(GridTestMixin, unittest.TestCase): d.addCallback(_check_cr, "mutable-normal") really_did_break = [] - # now break the server's remote_add_lease call + # now break the server's add_lease call def _break_add_lease(ign): def broken_add_lease(*args, **kwargs): really_did_break.append(1) raise KeyError("intentional failure, should be ignored") - assert self.g.servers_by_number[0].remote_add_lease - self.g.servers_by_number[0].remote_add_lease = broken_add_lease + assert self.g.servers_by_number[0].add_lease + self.g.servers_by_number[0].add_lease = broken_add_lease d.addCallback(_break_add_lease) # and confirm that the files still look healthy diff --git a/src/allmydata/test/test_client.py b/src/allmydata/test/test_client.py index fd2837f1d..c65a2fa2c 100644 --- a/src/allmydata/test/test_client.py +++ b/src/allmydata/test/test_client.py @@ -89,6 +89,7 @@ from .common import ( UseTestPlugins, MemoryIntroducerClient, get_published_announcements, + UseNode, ) from .matchers import ( MatchesSameElements, @@ -600,7 +601,7 @@ class Basic(testutil.ReallyEqualMixin, unittest.TestCase): "enabled = true\n") c = yield client.create_client(basedir) ss = c.getServiceNamed("storage") - verdict = ss.remote_get_version() + verdict = ss.get_version() self.failUnlessReallyEqual(verdict[b"application-version"], allmydata.__full_version__.encode("ascii")) self.failIfEqual(str(allmydata.__version__), "unknown") @@ -953,13 +954,14 @@ class Run(unittest.TestCase, testutil.StallMixin): @defer.inlineCallbacks def test_reloadable(self): - basedir = FilePath("test_client.Run.test_reloadable") - private = basedir.child("private") - private.makedirs() + from twisted.internet import reactor + dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus" - write_introducer(basedir, "someintroducer", dummy) - basedir.child("tahoe.cfg").setContent(BASECONFIG. encode("ascii")) - c1 = yield client.create_client(basedir.path) + fixture = UseNode(None, None, FilePath(self.mktemp()), dummy, reactor=reactor) + fixture.setUp() + self.addCleanup(fixture.cleanUp) + + c1 = yield fixture.create_node() c1.setServiceParent(self.sparent) # delay to let the service start up completely. I'm not entirely sure @@ -981,7 +983,7 @@ class Run(unittest.TestCase, testutil.StallMixin): # also change _check_exit_trigger to use it instead of a raw # reactor.stop, also instrument the shutdown event in an # attribute that we can check.) - c2 = yield client.create_client(basedir.path) + c2 = yield fixture.create_node() c2.setServiceParent(self.sparent) yield c2.disownServiceParent() diff --git a/src/allmydata/test/test_common_util.py b/src/allmydata/test/test_common_util.py index 55986d123..c141adc8d 100644 --- a/src/allmydata/test/test_common_util.py +++ b/src/allmydata/test/test_common_util.py @@ -10,16 +10,30 @@ from future.utils import PY2 if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 +import sys import random -import unittest +from hypothesis import given +from hypothesis.strategies import lists, sampled_from +from testtools.matchers import Equals +from twisted.python.reflect import ( + ModuleNotFound, + namedAny, +) + +from .common import ( + SyncTestCase, + disable_modules, +) from allmydata.test.common_util import flip_one_bit -class TestFlipOneBit(unittest.TestCase): +class TestFlipOneBit(SyncTestCase): def setUp(self): - random.seed(42) # I tried using version=1 on PY3 to avoid the if below, to no avail. + super(TestFlipOneBit, self).setUp() + # I tried using version=1 on PY3 to avoid the if below, to no avail. + random.seed(42) def test_accepts_byte_string(self): actual = flip_one_bit(b'foo') @@ -27,3 +41,61 @@ class TestFlipOneBit(unittest.TestCase): def test_rejects_unicode_string(self): self.assertRaises(AssertionError, flip_one_bit, u'foo') + + + +def some_existing_modules(): + """ + Build the names of modules (as native strings) that exist and can be + imported. + """ + candidates = sorted( + name + for name + in sys.modules + if "." not in name + and sys.modules[name] is not None + ) + return sampled_from(candidates) + +class DisableModulesTests(SyncTestCase): + """ + Tests for ``disable_modules``. + """ + def setup_example(self): + return sys.modules.copy() + + def teardown_example(self, safe_modules): + sys.modules.update(safe_modules) + + @given(lists(some_existing_modules(), unique=True)) + def test_importerror(self, module_names): + """ + While the ``disable_modules`` context manager is active any import of the + modules identified by the names passed to it result in ``ImportError`` + being raised. + """ + def get_modules(): + return list( + namedAny(name) + for name + in module_names + ) + before_modules = get_modules() + + with disable_modules(*module_names): + for name in module_names: + with self.assertRaises(ModuleNotFound): + namedAny(name) + + after_modules = get_modules() + self.assertThat(before_modules, Equals(after_modules)) + + def test_dotted_names_rejected(self): + """ + If names with "." in them are passed to ``disable_modules`` then + ``ValueError`` is raised. + """ + with self.assertRaises(ValueError): + with disable_modules("foo.bar"): + pass diff --git a/src/allmydata/test/test_consumer.py b/src/allmydata/test/test_consumer.py index a689de462..234fc2594 100644 --- a/src/allmydata/test/test_consumer.py +++ b/src/allmydata/test/test_consumer.py @@ -14,11 +14,17 @@ if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 from zope.interface import implementer -from twisted.trial.unittest import TestCase from twisted.internet.interfaces import IPushProducer, IPullProducer from allmydata.util.consumer import MemoryConsumer +from .common import ( + SyncTestCase, +) +from testtools.matchers import ( + Equals, +) + @implementer(IPushProducer) @implementer(IPullProducer) @@ -50,7 +56,7 @@ class Producer(object): self.consumer.unregisterProducer() -class MemoryConsumerTests(TestCase): +class MemoryConsumerTests(SyncTestCase): """Tests for MemoryConsumer.""" def test_push_producer(self): @@ -60,14 +66,14 @@ class MemoryConsumerTests(TestCase): consumer = MemoryConsumer() producer = Producer(consumer, [b"abc", b"def", b"ghi"]) consumer.registerProducer(producer, True) - self.assertEqual(consumer.chunks, [b"abc"]) + self.assertThat(consumer.chunks, Equals([b"abc"])) producer.iterate() producer.iterate() - self.assertEqual(consumer.chunks, [b"abc", b"def", b"ghi"]) - self.assertEqual(consumer.done, False) + self.assertThat(consumer.chunks, Equals([b"abc", b"def", b"ghi"])) + self.assertFalse(consumer.done) producer.iterate() - self.assertEqual(consumer.chunks, [b"abc", b"def", b"ghi"]) - self.assertEqual(consumer.done, True) + self.assertThat(consumer.chunks, Equals([b"abc", b"def", b"ghi"])) + self.assertTrue(consumer.done) def test_pull_producer(self): """ @@ -76,8 +82,8 @@ class MemoryConsumerTests(TestCase): consumer = MemoryConsumer() producer = Producer(consumer, [b"abc", b"def", b"ghi"]) consumer.registerProducer(producer, False) - self.assertEqual(consumer.chunks, [b"abc", b"def", b"ghi"]) - self.assertEqual(consumer.done, True) + self.assertThat(consumer.chunks, Equals([b"abc", b"def", b"ghi"])) + self.assertTrue(consumer.done) # download_to_data() is effectively tested by some of the filenode tests, e.g. diff --git a/src/allmydata/test/test_crawler.py b/src/allmydata/test/test_crawler.py index a9be90c43..80d732986 100644 --- a/src/allmydata/test/test_crawler.py +++ b/src/allmydata/test/test_crawler.py @@ -27,7 +27,7 @@ from allmydata.util import fileutil, hashutil, pollmixin from allmydata.storage.server import StorageServer, si_b2a from allmydata.storage.crawler import ShareCrawler, TimeSliceExceeded -from allmydata.test.common_util import StallMixin, FakeCanary +from allmydata.test.common_util import StallMixin class BucketEnumeratingCrawler(ShareCrawler): cpu_slice = 500 # make sure it can complete in a single slice @@ -124,12 +124,12 @@ class Basic(unittest.TestCase, StallMixin, pollmixin.PollMixin): def write(self, i, ss, serverid, tail=0): si = self.si(i) si = si[:-1] + bytes(bytearray((tail,))) - had,made = ss.remote_allocate_buckets(si, - self.rs(i, serverid), - self.cs(i, serverid), - set([0]), 99, FakeCanary()) - made[0].remote_write(0, b"data") - made[0].remote_close() + had,made = ss.allocate_buckets(si, + self.rs(i, serverid), + self.cs(i, serverid), + set([0]), 99) + made[0].write(0, b"data") + made[0].close() return si_b2a(si) def test_immediate(self): diff --git a/src/allmydata/test/test_crypto.py b/src/allmydata/test/test_crypto.py index 0aefa757f..052ddfbd7 100644 --- a/src/allmydata/test/test_crypto.py +++ b/src/allmydata/test/test_crypto.py @@ -60,6 +60,28 @@ class TestRegression(unittest.TestCase): # The public key corresponding to `RSA_2048_PRIV_KEY`. RSA_2048_PUB_KEY = b64decode(f.read().strip()) + with RESOURCE_DIR.child('pycryptopp-rsa-1024-priv.txt').open('r') as f: + # Created using `pycryptopp`: + # + # from base64 import b64encode + # from pycryptopp.publickey import rsa + # priv = rsa.generate(1024) + # priv_str = b64encode(priv.serialize()) + # pub_str = b64encode(priv.get_verifying_key().serialize()) + RSA_TINY_PRIV_KEY = b64decode(f.read().strip()) + assert isinstance(RSA_TINY_PRIV_KEY, native_bytes) + + with RESOURCE_DIR.child('pycryptopp-rsa-32768-priv.txt').open('r') as f: + # Created using `pycryptopp`: + # + # from base64 import b64encode + # from pycryptopp.publickey import rsa + # priv = rsa.generate(32768) + # priv_str = b64encode(priv.serialize()) + # pub_str = b64encode(priv.get_verifying_key().serialize()) + RSA_HUGE_PRIV_KEY = b64decode(f.read().strip()) + assert isinstance(RSA_HUGE_PRIV_KEY, native_bytes) + def test_old_start_up_test(self): """ This was the old startup test run at import time in `pycryptopp.cipher.aes`. @@ -232,6 +254,22 @@ class TestRegression(unittest.TestCase): priv_key, pub_key = rsa.create_signing_keypair_from_string(self.RSA_2048_PRIV_KEY) rsa.verify_signature(pub_key, self.RSA_2048_SIG, b'test') + def test_decode_tiny_rsa_keypair(self): + ''' + An unreasonably small RSA key is rejected ("unreasonably small" + means less that 2048 bits) + ''' + with self.assertRaises(ValueError): + rsa.create_signing_keypair_from_string(self.RSA_TINY_PRIV_KEY) + + def test_decode_huge_rsa_keypair(self): + ''' + An unreasonably _large_ RSA key is rejected ("unreasonably large" + means 32768 or more bits) + ''' + with self.assertRaises(ValueError): + rsa.create_signing_keypair_from_string(self.RSA_HUGE_PRIV_KEY) + def test_encrypt_data_not_bytes(self): ''' only bytes can be encrypted diff --git a/src/allmydata/test/test_deferredutil.py b/src/allmydata/test/test_deferredutil.py index 2a155089f..a37dfdd6f 100644 --- a/src/allmydata/test/test_deferredutil.py +++ b/src/allmydata/test/test_deferredutil.py @@ -129,3 +129,31 @@ class UntilTests(unittest.TestCase): self.assertEqual([1], counter) r1.callback(None) self.assertEqual([2], counter) + + +class AsyncToDeferred(unittest.TestCase): + """Tests for ``deferredutil.async_to_deferred.``""" + + def test_async_to_deferred_success(self): + """ + Normal results from a ``@async_to_deferred``-wrapped function get + turned into a ``Deferred`` with that value. + """ + @deferredutil.async_to_deferred + async def f(x, y): + return x + y + + result = f(1, y=2) + self.assertEqual(self.successResultOf(result), 3) + + def test_async_to_deferred_exception(self): + """ + Exceptions from a ``@async_to_deferred``-wrapped function get + turned into a ``Deferred`` with that value. + """ + @deferredutil.async_to_deferred + async def f(x, y): + return x/y + + result = f(1, 0) + self.assertIsInstance(self.failureResultOf(result).value, ZeroDivisionError) diff --git a/src/allmydata/test/test_download.py b/src/allmydata/test/test_download.py index d61942839..85d89cde6 100644 --- a/src/allmydata/test/test_download.py +++ b/src/allmydata/test/test_download.py @@ -14,6 +14,11 @@ if PY2: # a previous run. This asserts that the current code is capable of decoding # shares from a previous version. +try: + from typing import Any +except ImportError: + pass + import six import os from twisted.trial import unittest @@ -493,7 +498,7 @@ class DownloadTest(_Base, unittest.TestCase): d.addCallback(_done) return d - def test_simultaneous_onefails_onecancelled(self): + def test_simul_1fail_1cancel(self): # This exercises an mplayer behavior in ticket #1154. I believe that # mplayer made two simultaneous webapi GET requests: first one for an # index region at the end of the (mp3/video) file, then one for the @@ -951,12 +956,52 @@ class Corruption(_Base, unittest.TestCase): self.corrupt_shares_numbered(imm_uri, [2], _corruptor) def _corrupt_set(self, ign, imm_uri, which, newvalue): + # type: (Any, bytes, int, int) -> None + """ + Replace a single byte share file number 2 for the given capability with a + new byte. + + :param imm_uri: Corrupt share number 2 belonging to this capability. + :param which: The byte position to replace. + :param newvalue: The new byte value to set in the share. + """ log.msg("corrupt %d" % which) def _corruptor(s, debug=False): return s[:which] + bchr(newvalue) + s[which+1:] self.corrupt_shares_numbered(imm_uri, [2], _corruptor) def test_each_byte(self): + """ + Test share selection behavior of the downloader in the face of certain + kinds of data corruption. + + 1. upload a small share to the no-network grid + 2. read all of the resulting share files out of the no-network storage servers + 3. for each of + + a. each byte of the share file version field + b. each byte of the immutable share version field + c. each byte of the immutable share data offset field + d. the most significant byte of the block_shares offset field + e. one of the bytes of one of the merkle trees + f. one of the bytes of the share hashes list + + i. flip the least significant bit in all of the the share files + ii. perform the download/check/restore process + + 4. add 2 ** 24 to the share file version number + 5. perform the download/check/restore process + + 6. add 2 ** 24 to the share version number + 7. perform the download/check/restore process + + The download/check/restore process is: + + 1. attempt to download the data + 2. assert that the recovered plaintext is correct + 3. assert that only the "correct" share numbers were used to reconstruct the plaintext + 4. restore all of the share files to their pristine condition + """ # Setting catalog_detection=True performs an exhaustive test of the # Downloader's response to corruption in the lsb of each byte of the # 2070-byte share, with two goals: make sure we tolerate all forms of @@ -1068,9 +1113,17 @@ class Corruption(_Base, unittest.TestCase): d.addCallback(_download, imm_uri, i, expected) d.addCallback(lambda ign: self.restore_all_shares(self.shares)) d.addCallback(fireEventually) - corrupt_values = [(3, 2, "no-sh2"), - (15, 2, "need-4th"), # share looks v2 - ] + corrupt_values = [ + # Make the container version for share number 2 look + # unsupported. If you add support for immutable share file + # version number much past 16 million then you will have to + # update this test. Also maybe you have other problems. + (1, 255, "no-sh2"), + # Make the immutable share number 2 (not the container, the + # thing inside the container) look unsupported. Ditto the + # above about version numbers in the ballpark of 16 million. + (13, 255, "need-4th"), + ] for i,newvalue,expected in corrupt_values: d.addCallback(self._corrupt_set, imm_uri, i, newvalue) d.addCallback(_download, imm_uri, i, expected) @@ -1145,8 +1198,18 @@ class Corruption(_Base, unittest.TestCase): return d def _corrupt_flip_all(self, ign, imm_uri, which): + # type: (Any, bytes, int) -> None + """ + Flip the least significant bit at a given byte position in all share files + for the given capability. + """ def _corruptor(s, debug=False): - return s[:which] + bchr(ord(s[which:which+1])^0x01) + s[which+1:] + # type: (bytes, bool) -> bytes + before_corruption = s[:which] + after_corruption = s[which+1:] + original_byte = s[which:which+1] + corrupt_byte = bchr(ord(original_byte) ^ 0x01) + return b"".join([before_corruption, corrupt_byte, after_corruption]) self.corrupt_all_shares(imm_uri, _corruptor) class DownloadV2(_Base, unittest.TestCase): diff --git a/src/allmydata/test/test_eliotutil.py b/src/allmydata/test/test_eliotutil.py index 3f915ecd2..cabe599b3 100644 --- a/src/allmydata/test/test_eliotutil.py +++ b/src/allmydata/test/test_eliotutil.py @@ -27,13 +27,12 @@ from fixtures import ( ) from testtools import ( TestCase, -) -from testtools import ( TestResult, ) from testtools.matchers import ( Is, IsInstance, + Not, MatchesStructure, Equals, HasLength, @@ -65,11 +64,11 @@ from twisted.internet.task import deferLater from twisted.internet import reactor from ..util.eliotutil import ( + eliot_json_encoder, log_call_deferred, _parse_destination_description, _EliotLogging, ) -from ..util.jsonbytes import AnyBytesJSONEncoder from .common import ( SyncTestCase, @@ -77,24 +76,105 @@ from .common import ( ) -class EliotLoggedTestTests(AsyncTestCase): +def passes(): + """ + Create a matcher that matches a ``TestCase`` that runs without failures or + errors. + """ + def run(case): + result = TestResult() + case.run(result) + return result.wasSuccessful() + return AfterPreprocessing(run, Equals(True)) + + +class EliotLoggedTestTests(TestCase): + """ + Tests for the automatic log-related provided by ``AsyncTestCase``. + + This class uses ``testtools.TestCase`` because it is inconvenient to nest + ``AsyncTestCase`` inside ``AsyncTestCase`` (in particular, Eliot messages + emitted by the inner test case get observed by the outer test case and if + an inner case emits invalid messages they cause the outer test case to + fail). + """ + def test_fails(self): + """ + A test method of an ``AsyncTestCase`` subclass can fail. + """ + class UnderTest(AsyncTestCase): + def test_it(self): + self.fail("make sure it can fail") + + self.assertThat(UnderTest("test_it"), Not(passes())) + + def test_unserializable_fails(self): + """ + A test method of an ``AsyncTestCase`` subclass that logs an unserializable + value with Eliot fails. + """ + class world(object): + """ + an unserializable object + """ + + class UnderTest(AsyncTestCase): + def test_it(self): + Message.log(hello=world) + + self.assertThat(UnderTest("test_it"), Not(passes())) + + def test_logs_non_utf_8_byte(self): + """ + A test method of an ``AsyncTestCase`` subclass can log a message that + contains a non-UTF-8 byte string and return ``None`` and pass. + """ + class UnderTest(AsyncTestCase): + def test_it(self): + Message.log(hello=b"\xFF") + + self.assertThat(UnderTest("test_it"), passes()) + def test_returns_none(self): - Message.log(hello="world") + """ + A test method of an ``AsyncTestCase`` subclass can log a message and + return ``None`` and pass. + """ + class UnderTest(AsyncTestCase): + def test_it(self): + Message.log(hello="world") + + self.assertThat(UnderTest("test_it"), passes()) def test_returns_fired_deferred(self): - Message.log(hello="world") - return succeed(None) + """ + A test method of an ``AsyncTestCase`` subclass can log a message and + return an already-fired ``Deferred`` and pass. + """ + class UnderTest(AsyncTestCase): + def test_it(self): + Message.log(hello="world") + return succeed(None) + + self.assertThat(UnderTest("test_it"), passes()) def test_returns_unfired_deferred(self): - Message.log(hello="world") - # @eliot_logged_test automatically gives us an action context but it's - # still our responsibility to maintain it across stack-busting - # operations. - d = DeferredContext(deferLater(reactor, 0.0, lambda: None)) - d.addCallback(lambda ignored: Message.log(goodbye="world")) - # We didn't start an action. We're not finishing an action. - return d.result + """ + A test method of an ``AsyncTestCase`` subclass can log a message and + return an unfired ``Deferred`` and pass when the ``Deferred`` fires. + """ + class UnderTest(AsyncTestCase): + def test_it(self): + Message.log(hello="world") + # @eliot_logged_test automatically gives us an action context + # but it's still our responsibility to maintain it across + # stack-busting operations. + d = DeferredContext(deferLater(reactor, 0.0, lambda: None)) + d.addCallback(lambda ignored: Message.log(goodbye="world")) + # We didn't start an action. We're not finishing an action. + return d.result + self.assertThat(UnderTest("test_it"), passes()) class ParseDestinationDescriptionTests(SyncTestCase): @@ -109,7 +189,7 @@ class ParseDestinationDescriptionTests(SyncTestCase): reactor = object() self.assertThat( _parse_destination_description("file:-")(reactor), - Equals(FileDestination(stdout, encoder=AnyBytesJSONEncoder)), + Equals(FileDestination(stdout, encoder=eliot_json_encoder)), ) diff --git a/src/allmydata/test/test_helper.py b/src/allmydata/test/test_helper.py index 3faffbe0d..933a2b591 100644 --- a/src/allmydata/test/test_helper.py +++ b/src/allmydata/test/test_helper.py @@ -39,6 +39,7 @@ from allmydata.crypto import aes from allmydata.storage.server import ( si_b2a, StorageServer, + FoolscapStorageServer, ) from allmydata.storage_client import StorageFarmBroker from allmydata.immutable.layout import ( @@ -427,7 +428,7 @@ class CHKCheckerAndUEBFetcherTests(SyncTestCase): """ storage_index = b"a" * 16 serverid = b"b" * 20 - storage = StorageServer(self.mktemp(), serverid) + storage = FoolscapStorageServer(StorageServer(self.mktemp(), serverid)) rref_without_ueb = LocalWrapper(storage, fireNow) yield write_bad_share(rref_without_ueb, storage_index) server_without_ueb = NoNetworkServer(serverid, rref_without_ueb) @@ -451,7 +452,7 @@ class CHKCheckerAndUEBFetcherTests(SyncTestCase): """ storage_index = b"a" * 16 serverid = b"b" * 20 - storage = StorageServer(self.mktemp(), serverid) + storage = FoolscapStorageServer(StorageServer(self.mktemp(), serverid)) rref_with_ueb = LocalWrapper(storage, fireNow) ueb = { "needed_shares": 2, @@ -487,7 +488,7 @@ class CHKCheckerAndUEBFetcherTests(SyncTestCase): in [b"b", b"c"] ) storages = list( - StorageServer(self.mktemp(), serverid) + FoolscapStorageServer(StorageServer(self.mktemp(), serverid)) for serverid in serverids ) diff --git a/src/allmydata/test/test_hung_server.py b/src/allmydata/test/test_hung_server.py index 490315500..162b1d79c 100644 --- a/src/allmydata/test/test_hung_server.py +++ b/src/allmydata/test/test_hung_server.py @@ -73,7 +73,7 @@ class HungServerDownloadTest(GridTestMixin, ShouldFailMixin, PollMixin, def _copy_share(self, share, to_server): (sharenum, sharefile) = share (id, ss) = to_server - shares_dir = os.path.join(ss.original.storedir, "shares") + shares_dir = os.path.join(ss.original._server.storedir, "shares") si = uri.from_string(self.uri).get_storage_index() si_dir = os.path.join(shares_dir, storage_index_to_dir(si)) if not os.path.exists(si_dir): @@ -82,7 +82,7 @@ class HungServerDownloadTest(GridTestMixin, ShouldFailMixin, PollMixin, shutil.copy(sharefile, new_sharefile) self.shares = self.find_uri_shares(self.uri) # Make sure that the storage server has the share. - self.failUnless((sharenum, ss.original.my_nodeid, new_sharefile) + self.failUnless((sharenum, ss.original._server.my_nodeid, new_sharefile) in self.shares) def _corrupt_share(self, share, corruptor_func): diff --git a/src/allmydata/test/test_istorageserver.py b/src/allmydata/test/test_istorageserver.py index 40dcdc8bb..9e7e7b6e1 100644 --- a/src/allmydata/test/test_istorageserver.py +++ b/src/allmydata/test/test_istorageserver.py @@ -1,32 +1,30 @@ """ Tests for the ``IStorageServer`` interface. +Keep in mind that ``IStorageServer`` is actually the storage _client_ interface. + Note that for performance, in the future we might want the same node to be reused across tests, so each test should be careful to generate unique storage indexes. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals +from future.utils import bchr -from future.utils import PY2, bchr - -if PY2: - # fmt: off - from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 - # fmt: on +from typing import Set from random import Random +from unittest import SkipTest -from twisted.internet.defer import inlineCallbacks - +from twisted.internet.defer import inlineCallbacks, returnValue +from twisted.internet.task import Clock from foolscap.api import Referenceable, RemoteException +# A better name for this would be IStorageClient... from allmydata.interfaces import IStorageServer + from .common_system import SystemTestMixin from .common import AsyncTestCase +from allmydata.storage.server import StorageServer # not a IStorageServer!! # Use random generator with known seed, so results are reproducible if tests @@ -56,7 +54,7 @@ class IStorageServerSharedAPIsTestsMixin(object): """ Tests for ``IStorageServer``'s shared APIs. - ``self.storage_server`` is expected to provide ``IStorageServer``. + ``self.storage_client`` is expected to provide ``IStorageServer``. """ @inlineCallbacks @@ -65,7 +63,7 @@ class IStorageServerSharedAPIsTestsMixin(object): ``IStorageServer`` returns a dictionary where the key is an expected protocol version. """ - result = yield self.storage_server.get_version() + result = yield self.storage_client.get_version() self.assertIsInstance(result, dict) self.assertIn(b"http://allmydata.org/tahoe/protocols/storage/v1", result) @@ -74,11 +72,16 @@ class IStorageServerImmutableAPIsTestsMixin(object): """ Tests for ``IStorageServer``'s immutable APIs. - ``self.storage_server`` is expected to provide ``IStorageServer``. + ``self.storage_client`` is expected to provide ``IStorageServer``. ``self.disconnect()`` should disconnect and then reconnect, creating a new - ``self.storage_server``. Some implementations may wish to skip tests using + ``self.storage_client``. Some implementations may wish to skip tests using this; HTTP has no notion of disconnection. + + ``self.server`` is expected to be the corresponding + ``allmydata.storage.server.StorageServer`` instance. Time should be + instrumented, such that ``self.fake_time()`` and ``self.fake_sleep()`` + return and advance the server time, respectively. """ @inlineCallbacks @@ -87,7 +90,7 @@ class IStorageServerImmutableAPIsTestsMixin(object): allocate_buckets() with a new storage index returns the matching shares. """ - (already_got, allocated) = yield self.storage_server.allocate_buckets( + (already_got, allocated) = yield self.storage_client.allocate_buckets( new_storage_index(), renew_secret=new_secret(), cancel_secret=new_secret(), @@ -110,7 +113,7 @@ class IStorageServerImmutableAPIsTestsMixin(object): new_secret(), new_secret(), ) - (already_got, allocated) = yield self.storage_server.allocate_buckets( + (already_got, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, @@ -118,7 +121,7 @@ class IStorageServerImmutableAPIsTestsMixin(object): allocated_size=1024, canary=Referenceable(), ) - (already_got2, allocated2) = yield self.storage_server.allocate_buckets( + (already_got2, allocated2) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, @@ -146,7 +149,7 @@ class IStorageServerImmutableAPIsTestsMixin(object): new_secret(), new_secret(), ) - (_, allocated) = yield self.storage_server.allocate_buckets( + (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, @@ -155,14 +158,15 @@ class IStorageServerImmutableAPIsTestsMixin(object): canary=Referenceable(), ) - # Bucket 1 is fully written in one go. - yield allocated[0].callRemote("write", 0, b"1" * 1024) + # Bucket 1 get some data written (but not all, or HTTP implicitly + # finishes the upload) + yield allocated[0].callRemote("write", 0, b"1" * 1023) # Disconnect or abort, depending on the test: yield abort_or_disconnect(allocated[0]) # Write different data with no complaint: - (_, allocated) = yield self.storage_server.allocate_buckets( + (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, @@ -172,20 +176,6 @@ class IStorageServerImmutableAPIsTestsMixin(object): ) yield allocated[0].callRemote("write", 0, b"2" * 1024) - def test_disconnection(self): - """ - If we disconnect in the middle of writing to a bucket, all data is - wiped, and it's even possible to write different data to the bucket. - - (In the real world one shouldn't do that, but writing different data is - a good way to test that the original data really was wiped.) - - HTTP protocol should skip this test, since disconnection is meaningless - concept; this is more about testing implicit contract the Foolscap - implementation depends on doesn't change as we refactor things. - """ - return self.abort_or_disconnect_half_way(lambda _: self.disconnect()) - @inlineCallbacks def test_written_shares_are_allocated(self): """ @@ -198,7 +188,7 @@ class IStorageServerImmutableAPIsTestsMixin(object): new_secret(), new_secret(), ) - (_, allocated) = yield self.storage_server.allocate_buckets( + (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, @@ -219,7 +209,7 @@ class IStorageServerImmutableAPIsTestsMixin(object): # Bucket 0 has partial write. yield allocated[0].callRemote("write", 0, b"1" * 512) - (already_got, _) = yield self.storage_server.allocate_buckets( + (already_got, _) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, @@ -242,7 +232,7 @@ class IStorageServerImmutableAPIsTestsMixin(object): new_secret(), new_secret(), ) - (_, allocated) = yield self.storage_server.allocate_buckets( + (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, @@ -261,7 +251,7 @@ class IStorageServerImmutableAPIsTestsMixin(object): yield allocated[2].callRemote("write", 0, b"3" * 512) yield allocated[2].callRemote("close") - buckets = yield self.storage_server.get_buckets(storage_index) + buckets = yield self.storage_client.get_buckets(storage_index) self.assertEqual(set(buckets.keys()), {1, 2}) self.assertEqual( @@ -282,7 +272,7 @@ class IStorageServerImmutableAPIsTestsMixin(object): new_secret(), new_secret(), ) - (_, allocated) = yield self.storage_server.allocate_buckets( + (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, @@ -307,7 +297,7 @@ class IStorageServerImmutableAPIsTestsMixin(object): new_secret(), new_secret(), ) - (_, allocated) = yield self.storage_server.allocate_buckets( + (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, @@ -321,7 +311,7 @@ class IStorageServerImmutableAPIsTestsMixin(object): yield allocated[0].callRemote("write", 5, b"1" * 20) yield allocated[0].callRemote("close") - buckets = yield self.storage_server.get_buckets(storage_index) + buckets = yield self.storage_client.get_buckets(storage_index) self.assertEqual(set(buckets.keys()), {0}) self.assertEqual((yield buckets[0].callRemote("read", 0, 25)), b"1" * 25) @@ -346,7 +336,7 @@ class IStorageServerImmutableAPIsTestsMixin(object): ``IStorageServer.get_buckets()`` implementations. """ storage_index = new_storage_index() - (_, allocated) = yield self.storage_server.allocate_buckets( + (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret=new_secret(), cancel_secret=new_secret(), @@ -362,7 +352,7 @@ class IStorageServerImmutableAPIsTestsMixin(object): # Bucket 2 is partially written yield allocated[2].callRemote("write", 0, b"1" * 5) - buckets = yield self.storage_server.get_buckets(storage_index) + buckets = yield self.storage_client.get_buckets(storage_index) self.assertEqual(set(buckets.keys()), {1}) @inlineCallbacks @@ -375,7 +365,7 @@ class IStorageServerImmutableAPIsTestsMixin(object): length = 256 * 17 storage_index = new_storage_index() - (_, allocated) = yield self.storage_server.allocate_buckets( + (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret=new_secret(), cancel_secret=new_secret(), @@ -388,7 +378,7 @@ class IStorageServerImmutableAPIsTestsMixin(object): yield allocated[0].callRemote("write", 0, total_data) yield allocated[0].callRemote("close") - buckets = yield self.storage_server.get_buckets(storage_index) + buckets = yield self.storage_client.get_buckets(storage_index) bucket = buckets[0] for start, to_read in [ (0, 250), # fraction @@ -405,17 +395,15 @@ class IStorageServerImmutableAPIsTestsMixin(object): ) @inlineCallbacks - def test_bucket_advise_corrupt_share(self): - """ - Calling ``advise_corrupt_share()`` on a bucket returned by - ``IStorageServer.get_buckets()`` does not result in error (other - behavior is opaque at this level of abstraction). - """ + def create_share(self): + """Create a share, return the storage index.""" storage_index = new_storage_index() - (_, allocated) = yield self.storage_server.allocate_buckets( + renew_secret = new_secret() + cancel_secret = new_secret() + (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, - renew_secret=new_secret(), - cancel_secret=new_secret(), + renew_secret=renew_secret, + cancel_secret=cancel_secret, sharenums=set(range(1)), allocated_size=10, canary=Referenceable(), @@ -423,16 +411,118 @@ class IStorageServerImmutableAPIsTestsMixin(object): yield allocated[0].callRemote("write", 0, b"0123456789") yield allocated[0].callRemote("close") + returnValue((storage_index, renew_secret, cancel_secret)) - buckets = yield self.storage_server.get_buckets(storage_index) + @inlineCallbacks + def test_bucket_advise_corrupt_share(self): + """ + Calling ``advise_corrupt_share()`` on a bucket returned by + ``IStorageServer.get_buckets()`` does not result in error (other + behavior is opaque at this level of abstraction). + """ + storage_index, _, _ = yield self.create_share() + buckets = yield self.storage_client.get_buckets(storage_index) yield buckets[0].callRemote("advise_corrupt_share", b"OH NO") + @inlineCallbacks + def test_advise_corrupt_share(self): + """ + Calling ``advise_corrupt_share()`` on an immutable share does not + result in error (other behavior is opaque at this level of + abstraction). + """ + storage_index, _, _ = yield self.create_share() + yield self.storage_client.advise_corrupt_share( + b"immutable", storage_index, 0, b"ono" + ) + + @inlineCallbacks + def test_advise_corrupt_share_unknown_share_number(self): + """ + Calling ``advise_corrupt_share()`` on an immutable share, with an + unknown share number, does not result in error. + """ + storage_index, _, _ = yield self.create_share() + yield self.storage_client.advise_corrupt_share( + b"immutable", storage_index, 999, b"ono" + ) + + @inlineCallbacks + def test_allocate_buckets_creates_lease(self): + """ + When buckets are created using ``allocate_buckets()``, a lease is + created once writing is done. + """ + storage_index, _, _ = yield self.create_share() + [lease] = self.server.get_leases(storage_index) + # Lease expires in 31 days. + self.assertTrue( + lease.get_expiration_time() - self.fake_time() > (31 * 24 * 60 * 60 - 10) + ) + + @inlineCallbacks + def test_add_lease_non_existent(self): + """ + If the storage index doesn't exist, adding the lease silently does nothing. + """ + storage_index = new_storage_index() + self.assertEqual(list(self.server.get_leases(storage_index)), []) + + renew_secret = new_secret() + cancel_secret = new_secret() + + # Add a lease: + yield self.storage_client.add_lease(storage_index, renew_secret, cancel_secret) + self.assertEqual(list(self.server.get_leases(storage_index)), []) + + @inlineCallbacks + def test_add_lease_renewal(self): + """ + If the lease secret is reused, ``add_lease()`` extends the existing + lease. + """ + storage_index, renew_secret, cancel_secret = yield self.create_share() + [lease] = self.server.get_leases(storage_index) + initial_expiration_time = lease.get_expiration_time() + + # Time passes: + self.fake_sleep(178) + + # We renew the lease: + yield self.storage_client.add_lease(storage_index, renew_secret, cancel_secret) + [lease] = self.server.get_leases(storage_index) + new_expiration_time = lease.get_expiration_time() + self.assertEqual(new_expiration_time - initial_expiration_time, 178) + + @inlineCallbacks + def test_add_new_lease(self): + """ + If a new lease secret is used, ``add_lease()`` creates a new lease. + """ + storage_index, _, _ = yield self.create_share() + [lease] = self.server.get_leases(storage_index) + initial_expiration_time = lease.get_expiration_time() + + # Time passes: + self.fake_sleep(167) + + # We create a new lease: + renew_secret = new_secret() + cancel_secret = new_secret() + yield self.storage_client.add_lease(storage_index, renew_secret, cancel_secret) + [lease1, lease2] = self.server.get_leases(storage_index) + self.assertEqual(lease1.get_expiration_time(), initial_expiration_time) + self.assertEqual(lease2.get_expiration_time() - initial_expiration_time, 167) + class IStorageServerMutableAPIsTestsMixin(object): """ Tests for ``IStorageServer``'s mutable APIs. - ``self.storage_server`` is expected to provide ``IStorageServer``. + ``self.storage_client`` is expected to provide ``IStorageServer``. + + ``self.server`` is expected to be the corresponding + ``allmydata.storage.server.StorageServer`` instance. ``STARAW`` is short for ``slot_testv_and_readv_and_writev``. """ @@ -443,7 +533,7 @@ class IStorageServerMutableAPIsTestsMixin(object): def staraw(self, *args, **kwargs): """Like ``slot_testv_and_readv_and_writev``, but less typing.""" - return self.storage_server.slot_testv_and_readv_and_writev(*args, **kwargs) + return self.storage_client.slot_testv_and_readv_and_writev(*args, **kwargs) @inlineCallbacks def test_STARAW_reads_after_write(self): @@ -739,7 +829,7 @@ class IStorageServerMutableAPIsTestsMixin(object): ) self.assertEqual(written, True) - reads = yield self.storage_server.slot_readv( + reads = yield self.storage_client.slot_readv( storage_index, shares=[0, 1], # Whole thing, partial, going beyond the edge, completely outside @@ -770,7 +860,7 @@ class IStorageServerMutableAPIsTestsMixin(object): ) self.assertEqual(written, True) - reads = yield self.storage_server.slot_readv( + reads = yield self.storage_client.slot_readv( storage_index, shares=[], readv=[(0, 7)], @@ -780,51 +870,290 @@ class IStorageServerMutableAPIsTestsMixin(object): {0: [b"abcdefg"], 1: [b"0123456"], 2: [b"9876543"]}, ) + @inlineCallbacks + def test_slot_readv_unknown_storage_index(self): + """ + With unknown storage index, ``IStorageServer.slot_readv()`` returns + empty dict. + """ + storage_index = new_storage_index() + reads = yield self.storage_client.slot_readv( + storage_index, + shares=[], + readv=[(0, 7)], + ) + self.assertEqual( + reads, + {}, + ) -class _FoolscapMixin(SystemTestMixin): - """Run tests on Foolscap version of ``IStorageServer.""" + @inlineCallbacks + def create_slot(self): + """Create a slot with sharenum 0.""" + secrets = self.new_secrets() + storage_index = new_storage_index() + (written, _) = yield self.staraw( + storage_index, + secrets, + tw_vectors={ + 0: ([], [(0, b"abcdefg")], 7), + }, + r_vector=[], + ) + self.assertEqual(written, True) + returnValue((secrets, storage_index)) - def _get_native_server(self): - return next(iter(self.clients[0].storage_broker.get_known_servers())) + @inlineCallbacks + def test_advise_corrupt_share(self): + """ + Calling ``advise_corrupt_share()`` on a mutable share does not + result in error (other behavior is opaque at this level of + abstraction). + """ + secrets, storage_index = yield self.create_slot() + + yield self.storage_client.advise_corrupt_share( + b"mutable", storage_index, 0, b"ono" + ) + + @inlineCallbacks + def test_advise_corrupt_share_unknown_share_number(self): + """ + Calling ``advise_corrupt_share()`` on a mutable share with an unknown + share number does not result in error (other behavior is opaque at this + level of abstraction). + """ + secrets, storage_index = yield self.create_slot() + + yield self.storage_client.advise_corrupt_share( + b"mutable", storage_index, 999, b"ono" + ) + + @inlineCallbacks + def test_STARAW_create_lease(self): + """ + When STARAW creates a new slot, it also creates a lease. + """ + _, storage_index = yield self.create_slot() + [lease] = self.server.get_slot_leases(storage_index) + # Lease expires in 31 days. + self.assertTrue( + lease.get_expiration_time() - self.fake_time() > (31 * 24 * 60 * 60 - 10) + ) + + @inlineCallbacks + def test_STARAW_renews_lease(self): + """ + When STARAW is run on an existing slot with same renewal secret, it + renews the lease. + """ + secrets, storage_index = yield self.create_slot() + [lease] = self.server.get_slot_leases(storage_index) + initial_expire = lease.get_expiration_time() + + # Time passes... + self.fake_sleep(17) + + # We do another write: + (written, _) = yield self.staraw( + storage_index, + secrets, + tw_vectors={ + 0: ([], [(0, b"1234567")], 7), + }, + r_vector=[], + ) + self.assertEqual(written, True) + + # The lease has been renewed: + [lease] = self.server.get_slot_leases(storage_index) + self.assertEqual(lease.get_expiration_time() - initial_expire, 17) + + @inlineCallbacks + def test_STARAW_new_lease(self): + """ + When STARAW is run with a new renewal secret on an existing slot, it + adds a new lease. + """ + secrets, storage_index = yield self.create_slot() + [lease] = self.server.get_slot_leases(storage_index) + initial_expire = lease.get_expiration_time() + + # Time passes... + self.fake_sleep(19) + + # We do another write: + (written, _) = yield self.staraw( + storage_index, + (secrets[0], new_secret(), new_secret()), + tw_vectors={ + 0: ([], [(0, b"1234567")], 7), + }, + r_vector=[], + ) + self.assertEqual(written, True) + + # A new lease was added: + [lease1, lease2] = self.server.get_slot_leases(storage_index) + self.assertEqual(lease1.get_expiration_time(), initial_expire) + self.assertEqual(lease2.get_expiration_time() - initial_expire, 19) + + @inlineCallbacks + def test_add_lease_renewal(self): + """ + If the lease secret is reused, ``add_lease()`` extends the existing + lease. + """ + secrets, storage_index = yield self.create_slot() + [lease] = self.server.get_slot_leases(storage_index) + initial_expiration_time = lease.get_expiration_time() + + # Time passes: + self.fake_sleep(178) + + # We renew the lease: + yield self.storage_client.add_lease(storage_index, secrets[1], secrets[2]) + [lease] = self.server.get_slot_leases(storage_index) + new_expiration_time = lease.get_expiration_time() + self.assertEqual(new_expiration_time - initial_expiration_time, 178) + + @inlineCallbacks + def test_add_new_lease(self): + """ + If a new lease secret is used, ``add_lease()`` creates a new lease. + """ + secrets, storage_index = yield self.create_slot() + [lease] = self.server.get_slot_leases(storage_index) + initial_expiration_time = lease.get_expiration_time() + + # Time passes: + self.fake_sleep(167) + + # We create a new lease: + renew_secret = new_secret() + cancel_secret = new_secret() + yield self.storage_client.add_lease(storage_index, renew_secret, cancel_secret) + [lease1, lease2] = self.server.get_slot_leases(storage_index) + self.assertEqual(lease1.get_expiration_time(), initial_expiration_time) + self.assertEqual(lease2.get_expiration_time() - initial_expiration_time, 167) + + +class _SharedMixin(SystemTestMixin): + """Base class for Foolscap and HTTP mixins.""" + + SKIP_TESTS = set() # type: Set[str] + + def _get_istorage_server(self): + native_server = next(iter(self.clients[0].storage_broker.get_known_servers())) + client = native_server.get_storage_server() + self.assertTrue(IStorageServer.providedBy(client)) + return client @inlineCallbacks def setUp(self): + if self._testMethodName in self.SKIP_TESTS: + raise SkipTest( + "Test {} is still not supported".format(self._testMethodName) + ) + AsyncTestCase.setUp(self) + self.basedir = "test_istorageserver/" + self.id() yield SystemTestMixin.setUp(self) yield self.set_up_nodes(1) - self.storage_server = self._get_native_server().get_storage_server() - self.assertTrue(IStorageServer.providedBy(self.storage_server)) + self.server = None + for s in self.clients[0].services: + if isinstance(s, StorageServer): + self.server = s + break + assert self.server is not None, "Couldn't find StorageServer" + self._clock = Clock() + self._clock.advance(123456) + self.server._clock = self._clock + self.storage_client = self._get_istorage_server() + + def fake_time(self): + """Return the current fake, test-controlled, time.""" + return self._clock.seconds() + + def fake_sleep(self, seconds): + """Advance the fake time by the given number of seconds.""" + self._clock.advance(seconds) @inlineCallbacks def tearDown(self): AsyncTestCase.tearDown(self) yield SystemTestMixin.tearDown(self) + +class FoolscapSharedAPIsTests( + _SharedMixin, IStorageServerSharedAPIsTestsMixin, AsyncTestCase +): + """Foolscap-specific tests for shared ``IStorageServer`` APIs.""" + + FORCE_FOOLSCAP_FOR_STORAGE = True + + +class HTTPSharedAPIsTests( + _SharedMixin, IStorageServerSharedAPIsTestsMixin, AsyncTestCase +): + """HTTP-specific tests for shared ``IStorageServer`` APIs.""" + + FORCE_FOOLSCAP_FOR_STORAGE = False + + +class FoolscapImmutableAPIsTests( + _SharedMixin, IStorageServerImmutableAPIsTestsMixin, AsyncTestCase +): + """Foolscap-specific tests for immutable ``IStorageServer`` APIs.""" + + FORCE_FOOLSCAP_FOR_STORAGE = True + + def test_disconnection(self): + """ + If we disconnect in the middle of writing to a bucket, all data is + wiped, and it's even possible to write different data to the bucket. + + (In the real world one shouldn't do that, but writing different data is + a good way to test that the original data really was wiped.) + + HTTP protocol doesn't need this test, since disconnection is a + meaningless concept; this is more about testing the implicit contract + the Foolscap implementation depends on doesn't change as we refactor + things. + """ + return self.abort_or_disconnect_half_way(lambda _: self.disconnect()) + @inlineCallbacks def disconnect(self): """ Disconnect and then reconnect with a new ``IStorageServer``. """ - current = self.storage_server + current = self.storage_client yield self.bounce_client(0) - self.storage_server = self._get_native_server().get_storage_server() - assert self.storage_server is not current + self.storage_client = self._get_istorage_server() + assert self.storage_client is not current -class FoolscapSharedAPIsTests( - _FoolscapMixin, IStorageServerSharedAPIsTestsMixin, AsyncTestCase +class HTTPImmutableAPIsTests( + _SharedMixin, IStorageServerImmutableAPIsTestsMixin, AsyncTestCase ): - """Foolscap-specific tests for shared ``IStorageServer`` APIs.""" + """HTTP-specific tests for immutable ``IStorageServer`` APIs.""" - -class FoolscapImmutableAPIsTests( - _FoolscapMixin, IStorageServerImmutableAPIsTestsMixin, AsyncTestCase -): - """Foolscap-specific tests for immutable ``IStorageServer`` APIs.""" + FORCE_FOOLSCAP_FOR_STORAGE = False class FoolscapMutableAPIsTests( - _FoolscapMixin, IStorageServerMutableAPIsTestsMixin, AsyncTestCase + _SharedMixin, IStorageServerMutableAPIsTestsMixin, AsyncTestCase ): - """Foolscap-specific tests for immutable ``IStorageServer`` APIs.""" + """Foolscap-specific tests for mutable ``IStorageServer`` APIs.""" + + FORCE_FOOLSCAP_FOR_STORAGE = True + + +class HTTPMutableAPIsTests( + _SharedMixin, IStorageServerMutableAPIsTestsMixin, AsyncTestCase +): + """HTTP-specific tests for mutable ``IStorageServer`` APIs.""" + + FORCE_FOOLSCAP_FOR_STORAGE = False diff --git a/src/allmydata/test/test_node.py b/src/allmydata/test/test_node.py index cf5fa27f3..c6cff1bab 100644 --- a/src/allmydata/test/test_node.py +++ b/src/allmydata/test/test_node.py @@ -69,6 +69,8 @@ import allmydata.test.common_util as testutil from .common import ( ConstantAddresses, + SameProcessStreamEndpointAssigner, + UseNode, ) def port_numbers(): @@ -80,11 +82,10 @@ class LoggingMultiService(service.MultiService): # see https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2946 -def testing_tub(config_data=''): +def testing_tub(reactor, config_data=''): """ Creates a 'main' Tub for testing purposes, from config data """ - from twisted.internet import reactor basedir = 'dummy_basedir' config = config_from_string(basedir, 'DEFAULT_PORTNUMFILE_BLANK', config_data) fileutil.make_dirs(os.path.join(basedir, 'private')) @@ -112,6 +113,9 @@ class TestCase(testutil.SignalMixin, unittest.TestCase): # try to bind the port. We'll use a low-numbered one that's likely to # conflict with another service to prove it. self._available_port = 22 + self.port_assigner = SameProcessStreamEndpointAssigner() + self.port_assigner.setUp() + self.addCleanup(self.port_assigner.tearDown) def _test_location( self, @@ -137,11 +141,23 @@ class TestCase(testutil.SignalMixin, unittest.TestCase): :param local_addresses: If not ``None`` then a list of addresses to supply to the system under test as local addresses. """ + from twisted.internet import reactor + basedir = self.mktemp() create_node_dir(basedir, "testing") + if tub_port is None: + # Always configure a usable tub.port address instead of relying on + # the automatic port assignment. The automatic port assignment is + # prone to collisions and spurious test failures. + _, tub_port = self.port_assigner.assign(reactor) + config_data = "[node]\n" - if tub_port: - config_data += "tub.port = {}\n".format(tub_port) + config_data += "tub.port = {}\n".format(tub_port) + + # If they wanted a certain location, go for it. This probably won't + # agree with the tub.port value we set but that only matters if + # anything tries to use this to establish a connection ... which + # nothing in this test suite will. if tub_location is not None: config_data += "tub.location = {}\n".format(tub_location) @@ -149,7 +165,7 @@ class TestCase(testutil.SignalMixin, unittest.TestCase): self.patch(iputil, 'get_local_addresses_sync', lambda: local_addresses) - tub = testing_tub(config_data) + tub = testing_tub(reactor, config_data) class Foo(object): pass @@ -431,7 +447,12 @@ class TestCase(testutil.SignalMixin, unittest.TestCase): @defer.inlineCallbacks def test_logdir_is_str(self): - basedir = "test_node/test_logdir_is_str" + from twisted.internet import reactor + + basedir = FilePath(self.mktemp()) + fixture = UseNode(None, None, basedir, "pb://introducer/furl", {}, reactor=reactor) + fixture.setUp() + self.addCleanup(fixture.cleanUp) ns = Namespace() ns.called = False @@ -440,8 +461,7 @@ class TestCase(testutil.SignalMixin, unittest.TestCase): self.failUnless(isinstance(logdir, str), logdir) self.patch(foolscap.logging.log, 'setLogDir', call_setLogDir) - create_node_dir(basedir, "nothing to see here") - yield client.create_client(basedir) + yield fixture.create_node() self.failUnless(ns.called) def test_set_config_unescaped_furl_hash(self): diff --git a/src/allmydata/test/test_protocol_switch.py b/src/allmydata/test/test_protocol_switch.py new file mode 100644 index 000000000..4906896dc --- /dev/null +++ b/src/allmydata/test/test_protocol_switch.py @@ -0,0 +1,43 @@ +""" +Unit tests for ``allmydata.protocol_switch``. + +By its nature, most of the testing needs to be end-to-end; essentially any test +that uses real Foolscap (``test_system.py``, integration tests) ensures +Foolscap still works. ``test_istorageserver.py`` tests the HTTP support. +""" + +from foolscap.negotiate import Negotiation + +from .common import TestCase +from ..protocol_switch import _PretendToBeNegotiation + + +class UtilityTests(TestCase): + """Tests for utilities in the protocol switch code.""" + + def test_metaclass(self): + """ + A class that has the ``_PretendToBeNegotiation`` metaclass will support + ``isinstance()``'s normal semantics on its own instances, but will also + indicate that ``Negotiation`` instances are its instances. + """ + + class Parent(metaclass=_PretendToBeNegotiation): + pass + + class Child(Parent): + pass + + class Other: + pass + + p = Parent() + self.assertIsInstance(p, Parent) + self.assertIsInstance(Negotiation(), Parent) + self.assertNotIsInstance(Other(), Parent) + + c = Child() + self.assertIsInstance(c, Child) + self.assertIsInstance(c, Parent) + self.assertIsInstance(Negotiation(), Child) + self.assertNotIsInstance(Other(), Child) diff --git a/src/allmydata/test/test_repairer.py b/src/allmydata/test/test_repairer.py index 88696000c..8545b1cf4 100644 --- a/src/allmydata/test/test_repairer.py +++ b/src/allmydata/test/test_repairer.py @@ -251,6 +251,12 @@ class Verifier(GridTestMixin, unittest.TestCase, RepairTestMixin): self.judge_invisible_corruption) def test_corrupt_ueb(self): + # Note that in some rare situations this might fail, specifically if + # the length of the UEB is corrupted to be a value that is bigger than + # the size but less than 2000, it might not get caught... But that's + # mostly because in that case it doesn't meaningfully corrupt it. See + # _get_uri_extension_the_old_way() in layout.py for where the 2000 + # number comes from. self.basedir = "repairer/Verifier/corrupt_ueb" return self._help_test_verify(common._corrupt_uri_extension, self.judge_invisible_corruption) @@ -717,7 +723,7 @@ class Repairer(GridTestMixin, unittest.TestCase, RepairTestMixin, ss = self.g.servers_by_number[0] # we want to delete the share corresponding to the server # we're making not-respond - share = next(ss._get_bucket_shares(self.c0_filenode.get_storage_index()))[0] + share = next(ss.get_shares(self.c0_filenode.get_storage_index()))[0] self.delete_shares_numbered(self.uri, [share]) return self.c0_filenode.check_and_repair(Monitor()) d.addCallback(_then) diff --git a/src/allmydata/test/test_runner.py b/src/allmydata/test/test_runner.py index 44c7e1bee..74e3f803e 100644 --- a/src/allmydata/test/test_runner.py +++ b/src/allmydata/test/test_runner.py @@ -42,16 +42,19 @@ from twisted.trial import unittest from twisted.internet import reactor from twisted.python import usage +from twisted.python.runtime import platform from twisted.internet.defer import ( inlineCallbacks, DeferredList, ) from twisted.python.filepath import FilePath -from twisted.python.runtime import ( - platform, -) from allmydata.util import fileutil, pollmixin from allmydata.util.encodingutil import unicode_to_argv +from allmydata.util.pid import ( + check_pid_process, + _pidfile_to_lockpath, + ProcessInTheWay, +) from allmydata.test import common_util import allmydata from allmydata.scripts.runner import ( @@ -203,10 +206,10 @@ class BinTahoe(common_util.SignalMixin, unittest.TestCase): # but on Windows we parse the whole command line string ourselves so # we have to have our own implementation of skipping these options. - # -t is a harmless option that warns about tabs so we can add it + # -B is a harmless option that prevents writing bytecode so we can add it # without impacting other behavior noticably. - out, err, returncode = run_bintahoe([u"--version"], python_options=[u"-t"]) - self.assertEqual(returncode, 0) + out, err, returncode = run_bintahoe([u"--version"], python_options=[u"-B"]) + self.assertEqual(returncode, 0, f"Out:\n{out}\nErr:\n{err}") self.assertTrue(out.startswith(allmydata.__appname__ + '/')) def test_help_eliot_destinations(self): @@ -418,9 +421,7 @@ class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin): tahoe.active() - # We don't keep track of PIDs in files on Windows. - if not platform.isWindows(): - self.assertTrue(tahoe.twistd_pid_file.exists()) + self.assertTrue(tahoe.twistd_pid_file.exists()) self.assertTrue(tahoe.node_url_file.exists()) # rm this so we can detect when the second incarnation is ready @@ -493,9 +494,7 @@ class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin): # change on restart storage_furl = fileutil.read(tahoe.storage_furl_file.path) - # We don't keep track of PIDs in files on Windows. - if not platform.isWindows(): - self.assertTrue(tahoe.twistd_pid_file.exists()) + self.assertTrue(tahoe.twistd_pid_file.exists()) # rm this so we can detect when the second incarnation is ready tahoe.node_url_file.remove() @@ -513,22 +512,23 @@ class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin): fileutil.read(tahoe.storage_furl_file.path), ) - if not platform.isWindows(): - self.assertTrue( - tahoe.twistd_pid_file.exists(), - "PID file ({}) didn't exist when we expected it to. " - "These exist: {}".format( - tahoe.twistd_pid_file, - tahoe.twistd_pid_file.parent().listdir(), - ), - ) + self.assertTrue( + tahoe.twistd_pid_file.exists(), + "PID file ({}) didn't exist when we expected it to. " + "These exist: {}".format( + tahoe.twistd_pid_file, + tahoe.twistd_pid_file.parent().listdir(), + ), + ) yield tahoe.stop_and_wait() + # twistd.pid should be gone by now -- except on Windows, where + # killing a subprocess immediately exits with no chance for + # any shutdown code (that is, no Twisted shutdown hooks can + # run). if not platform.isWindows(): - # twistd.pid should be gone by now. self.assertFalse(tahoe.twistd_pid_file.exists()) - def _remove(self, res, file): fileutil.remove(file) return res @@ -610,8 +610,9 @@ class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin): ), ) + # It should not be running (but windows shutdown can't run + # code so the PID file still exists there). if not platform.isWindows(): - # It should not be running. self.assertFalse(tahoe.twistd_pid_file.exists()) # Wait for the operation to *complete*. If we got this far it's @@ -621,3 +622,42 @@ class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin): # What's left is a perfect indicator that the process has exited and # we won't get blamed for leaving the reactor dirty. yield client_running + + +class PidFileLocking(SyncTestCase): + """ + Direct tests for allmydata.util.pid functions + """ + + def test_locking(self): + """ + Fail to create a pidfile if another process has the lock already. + """ + # this can't just be "our" process because the locking library + # allows the same process to acquire a lock multiple times. + pidfile = FilePath(self.mktemp()) + lockfile = _pidfile_to_lockpath(pidfile) + + with open("other_lock.py", "w") as f: + f.write( + "\n".join([ + "import filelock, time, sys", + "with filelock.FileLock(sys.argv[1], timeout=1):", + " sys.stdout.write('.\\n')", + " sys.stdout.flush()", + " time.sleep(10)", + ]) + ) + proc = Popen( + [sys.executable, "other_lock.py", lockfile.path], + stdout=PIPE, + stderr=PIPE, + ) + # make sure our subprocess has had time to acquire the lock + # for sure (from the "." it prints) + proc.stdout.read(2) + + # acquiring the same lock should fail; it is locked by the subprocess + with self.assertRaises(ProcessInTheWay): + check_pid_process(pidfile) + proc.terminate() diff --git a/src/allmydata/test/test_stats.py b/src/allmydata/test/test_stats.py index e56f9d444..6fe690f1f 100644 --- a/src/allmydata/test/test_stats.py +++ b/src/allmydata/test/test_stats.py @@ -17,7 +17,7 @@ from allmydata.util import pollmixin import allmydata.test.common_util as testutil class FasterMonitor(CPUUsageMonitor): - POLL_INTERVAL = 0.1 + POLL_INTERVAL = 0.01 class CPUUsage(unittest.TestCase, pollmixin.PollMixin, testutil.StallMixin): @@ -36,9 +36,9 @@ class CPUUsage(unittest.TestCase, pollmixin.PollMixin, testutil.StallMixin): def _poller(): return bool(len(m.samples) == m.HISTORY_LENGTH+1) d = self.poll(_poller) - # pause one more second, to make sure that the history-trimming code - # is exercised - d.addCallback(self.stall, 1.0) + # pause a couple more intervals, to make sure that the history-trimming + # code is exercised + d.addCallback(self.stall, FasterMonitor.POLL_INTERVAL * 2) def _check(res): s = m.get_stats() self.failUnless("cpu_monitor.1min_avg" in s) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index d18960a1e..134609f81 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -13,28 +13,47 @@ if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 from six import ensure_str +from io import ( + BytesIO, +) import time import os.path import platform import stat import struct import shutil +from functools import partial from uuid import uuid4 +from testtools.matchers import ( + HasLength, +) + from twisted.trial import unittest from twisted.internet import defer from twisted.internet.task import Clock -from hypothesis import given, strategies +from hypothesis import given, strategies, example import itertools from allmydata import interfaces from allmydata.util import fileutil, hashutil, base32 -from allmydata.storage.server import StorageServer, DEFAULT_RENEWAL_TIME +from allmydata.storage.server import ( + StorageServer, DEFAULT_RENEWAL_TIME, FoolscapStorageServer, +) from allmydata.storage.shares import get_share_file from allmydata.storage.mutable import MutableShareFile -from allmydata.storage.immutable import BucketWriter, BucketReader, ShareFile +from allmydata.storage.mutable_schema import ( + ALL_SCHEMAS as ALL_MUTABLE_SCHEMAS, +) +from allmydata.storage.immutable import ( + BucketWriter, BucketReader, ShareFile, FoolscapBucketWriter, + FoolscapBucketReader, +) +from allmydata.storage.immutable_schema import ( + ALL_SCHEMAS as ALL_IMMUTABLE_SCHEMAS, +) from allmydata.storage.common import storage_index_to_dir, \ UnknownMutableContainerVersionError, UnknownImmutableContainerVersionError, \ si_b2a, si_a2b @@ -56,8 +75,21 @@ from allmydata.test.no_network import NoNetworkServer from allmydata.storage_client import ( _StorageServer, ) -from .common import LoggingServiceParent, ShouldFailMixin +from .common import ( + LoggingServiceParent, + ShouldFailMixin, + FakeDisk, + SyncTestCase, +) from .common_util import FakeCanary +from .common_storage import ( + upload_immutable, + upload_mutable, +) +from .strategies import ( + offsets, + lengths, +) class UtilTests(unittest.TestCase): @@ -102,6 +134,7 @@ class FakeStatsProvider(object): def register_producer(self, producer): pass + class Bucket(unittest.TestCase): def make_workdir(self, name): basedir = os.path.join("storage", "Bucket", name) @@ -128,26 +161,26 @@ class Bucket(unittest.TestCase): def test_create(self): incoming, final = self.make_workdir("test_create") - bw = BucketWriter(self, incoming, final, 200, self.make_lease()) - bw.remote_write(0, b"a"*25) - bw.remote_write(25, b"b"*25) - bw.remote_write(50, b"c"*25) - bw.remote_write(75, b"d"*7) - bw.remote_close() + bw = BucketWriter(self, incoming, final, 200, self.make_lease(), Clock()) + bw.write(0, b"a"*25) + bw.write(25, b"b"*25) + bw.write(50, b"c"*25) + bw.write(75, b"d"*7) + bw.close() def test_readwrite(self): incoming, final = self.make_workdir("test_readwrite") - bw = BucketWriter(self, incoming, final, 200, self.make_lease()) - bw.remote_write(0, b"a"*25) - bw.remote_write(25, b"b"*25) - bw.remote_write(50, b"c"*7) # last block may be short - bw.remote_close() + bw = BucketWriter(self, incoming, final, 200, self.make_lease(), Clock()) + bw.write(0, b"a"*25) + bw.write(25, b"b"*25) + bw.write(50, b"c"*7) # last block may be short + bw.close() # now read from it br = BucketReader(self, bw.finalhome) - self.failUnlessEqual(br.remote_read(0, 25), b"a"*25) - self.failUnlessEqual(br.remote_read(25, 25), b"b"*25) - self.failUnlessEqual(br.remote_read(50, 7), b"c"*7) + self.failUnlessEqual(br.read(0, 25), b"a"*25) + self.failUnlessEqual(br.read(25, 25), b"b"*25) + self.failUnlessEqual(br.read(50, 7), b"c"*7) def test_write_past_size_errors(self): """Writing beyond the size of the bucket throws an exception.""" @@ -155,9 +188,9 @@ class Bucket(unittest.TestCase): incoming, final = self.make_workdir( "test_write_past_size_errors-{}".format(i) ) - bw = BucketWriter(self, incoming, final, 200, self.make_lease()) + bw = BucketWriter(self, incoming, final, 200, self.make_lease(), Clock()) with self.assertRaises(DataTooLargeError): - bw.remote_write(offset, b"a" * length) + bw.write(offset, b"a" * length) @given( maybe_overlapping_offset=strategies.integers(min_value=0, max_value=98), @@ -174,29 +207,28 @@ class Bucket(unittest.TestCase): expected_data = b"".join(bchr(i) for i in range(100)) incoming, final = self.make_workdir("overlapping_writes_{}".format(uuid4())) bw = BucketWriter( - self, incoming, final, length, self.make_lease(), + self, incoming, final, length, self.make_lease(), Clock() ) # Three writes: 10-19, 30-39, 50-59. This allows for a bunch of holes. - bw.remote_write(10, expected_data[10:20]) - bw.remote_write(30, expected_data[30:40]) - bw.remote_write(50, expected_data[50:60]) + bw.write(10, expected_data[10:20]) + bw.write(30, expected_data[30:40]) + bw.write(50, expected_data[50:60]) # Then, an overlapping write but with matching data: - bw.remote_write( + bw.write( maybe_overlapping_offset, expected_data[ maybe_overlapping_offset:maybe_overlapping_offset + maybe_overlapping_length ] ) # Now fill in the holes: - bw.remote_write(0, expected_data[0:10]) - bw.remote_write(20, expected_data[20:30]) - bw.remote_write(40, expected_data[40:50]) - bw.remote_write(60, expected_data[60:]) - bw.remote_close() + bw.write(0, expected_data[0:10]) + bw.write(20, expected_data[20:30]) + bw.write(40, expected_data[40:50]) + bw.write(60, expected_data[60:]) + bw.close() br = BucketReader(self, bw.finalhome) - self.assertEqual(br.remote_read(0, length), expected_data) - + self.assertEqual(br.read(0, length), expected_data) @given( maybe_overlapping_offset=strategies.integers(min_value=0, max_value=98), @@ -212,24 +244,56 @@ class Bucket(unittest.TestCase): length = 100 incoming, final = self.make_workdir("overlapping_writes_{}".format(uuid4())) bw = BucketWriter( - self, incoming, final, length, self.make_lease(), + self, incoming, final, length, self.make_lease(), Clock() ) # Three writes: 10-19, 30-39, 50-59. This allows for a bunch of holes. - bw.remote_write(10, b"1" * 10) - bw.remote_write(30, b"1" * 10) - bw.remote_write(50, b"1" * 10) + bw.write(10, b"1" * 10) + bw.write(30, b"1" * 10) + bw.write(50, b"1" * 10) # Then, write something that might overlap with some of them, but # conflicts. Then fill in holes left by first three writes. Conflict is # inevitable. with self.assertRaises(ConflictingWriteError): - bw.remote_write( + bw.write( maybe_overlapping_offset, b'X' * min(maybe_overlapping_length, length - maybe_overlapping_offset), ) - bw.remote_write(0, b"1" * 10) - bw.remote_write(20, b"1" * 10) - bw.remote_write(40, b"1" * 10) - bw.remote_write(60, b"1" * 40) + bw.write(0, b"1" * 10) + bw.write(20, b"1" * 10) + bw.write(40, b"1" * 10) + bw.write(60, b"1" * 40) + + @given( + offsets=strategies.lists( + strategies.integers(min_value=0, max_value=99), + min_size=20, + max_size=20 + ), + ) + @example(offsets=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 40, 70]) + def test_writes_return_when_finished( + self, offsets + ): + """ + The ``BucketWriter.write()`` return true if and only if the maximum + size has been reached via potentially overlapping writes. The + remaining ranges can be checked via ``BucketWriter.required_ranges()``. + """ + incoming, final = self.make_workdir("overlapping_writes_{}".format(uuid4())) + bw = BucketWriter( + self, incoming, final, 100, self.make_lease(), Clock() + ) + local_written = [0] * 100 + for offset in offsets: + length = min(30, 100 - offset) + data = b"1" * length + for i in range(offset, offset+length): + local_written[i] = 1 + finished = bw.write(offset, data) + self.assertEqual(finished, sum(local_written) == 100) + required_ranges = bw.required_ranges() + for i in range(0, 100): + self.assertEqual(local_written[i] == 1, required_ranges.get(i) is None) def test_read_past_end_of_share_data(self): # test vector for immutable files (hard-coded contents of an immutable share @@ -274,17 +338,78 @@ class Bucket(unittest.TestCase): # Now read from it. br = BucketReader(mockstorageserver, final) - self.failUnlessEqual(br.remote_read(0, len(share_data)), share_data) + self.failUnlessEqual(br.read(0, len(share_data)), share_data) # Read past the end of share data to get the cancel secret. read_length = len(share_data) + len(ownernumber) + len(renewsecret) + len(cancelsecret) - result_of_read = br.remote_read(0, read_length) + result_of_read = br.read(0, read_length) self.failUnlessEqual(result_of_read, share_data) - result_of_read = br.remote_read(0, len(share_data)+1) + result_of_read = br.read(0, len(share_data)+1) self.failUnlessEqual(result_of_read, share_data) + def _assert_timeout_only_after_30_minutes(self, clock, bw): + """ + The ``BucketWriter`` times out and is closed after 30 minutes, but not + sooner. + """ + self.assertFalse(bw.closed) + # 29 minutes pass. Everything is fine. + for i in range(29): + clock.advance(60) + self.assertFalse(bw.closed, "Bucket closed after only %d minutes" % (i + 1,)) + # After the 30th minute, the bucket is closed due to lack of writes. + clock.advance(60) + self.assertTrue(bw.closed) + + def test_bucket_expires_if_no_writes_for_30_minutes(self): + """ + If a ``BucketWriter`` receives no writes for 30 minutes, it is removed. + """ + incoming, final = self.make_workdir("test_bucket_expires") + clock = Clock() + bw = BucketWriter(self, incoming, final, 200, self.make_lease(), clock) + self._assert_timeout_only_after_30_minutes(clock, bw) + + def test_bucket_writes_delay_timeout(self): + """ + So long as the ``BucketWriter`` receives writes, the the removal + timeout is put off. + """ + incoming, final = self.make_workdir("test_bucket_writes_delay_timeout") + clock = Clock() + bw = BucketWriter(self, incoming, final, 200, self.make_lease(), clock) + # 29 minutes pass, getting close to the timeout... + clock.advance(29 * 60) + # .. but we receive a write! So that should delay the timeout again to + # another 30 minutes. + bw.write(0, b"hello") + self._assert_timeout_only_after_30_minutes(clock, bw) + + def test_bucket_closing_cancels_timeout(self): + """ + Closing cancels the ``BucketWriter`` timeout. + """ + incoming, final = self.make_workdir("test_bucket_close_timeout") + clock = Clock() + bw = BucketWriter(self, incoming, final, 10, self.make_lease(), clock) + self.assertTrue(clock.getDelayedCalls()) + bw.close() + self.assertFalse(clock.getDelayedCalls()) + + def test_bucket_aborting_cancels_timeout(self): + """ + Closing cancels the ``BucketWriter`` timeout. + """ + incoming, final = self.make_workdir("test_bucket_abort_timeout") + clock = Clock() + bw = BucketWriter(self, incoming, final, 10, self.make_lease(), clock) + self.assertTrue(clock.getDelayedCalls()) + bw.abort() + self.assertFalse(clock.getDelayedCalls()) + + class RemoteBucket(object): def __init__(self, target): @@ -312,8 +437,8 @@ class BucketProxy(unittest.TestCase): final = os.path.join(basedir, "bucket") fileutil.make_dirs(basedir) fileutil.make_dirs(os.path.join(basedir, "tmp")) - bw = BucketWriter(self, incoming, final, size, self.make_lease()) - rb = RemoteBucket(bw) + bw = BucketWriter(self, incoming, final, size, self.make_lease(), Clock()) + rb = RemoteBucket(FoolscapBucketWriter(bw)) return bw, rb, final def make_lease(self): @@ -338,7 +463,7 @@ class BucketProxy(unittest.TestCase): block_size=10, num_segments=5, num_share_hashes=3, - uri_extension_size_max=500) + uri_extension_size=500) self.failUnless(interfaces.IStorageBucketWriter.providedBy(bp), bp) def _do_test_readwrite(self, name, header_size, wbp_class, rbp_class): @@ -369,7 +494,7 @@ class BucketProxy(unittest.TestCase): block_size=25, num_segments=4, num_share_hashes=3, - uri_extension_size_max=len(uri_extension)) + uri_extension_size=len(uri_extension)) d = bp.put_header() d.addCallback(lambda res: bp.put_block(0, b"a"*25)) @@ -385,7 +510,7 @@ class BucketProxy(unittest.TestCase): # now read everything back def _start_reading(res): br = BucketReader(self, sharefname) - rb = RemoteBucket(br) + rb = RemoteBucket(FoolscapBucketReader(br)) server = NoNetworkServer(b"abc", None) rbp = rbp_class(rb, server, storage_index=b"") self.failUnlessIn("to peer", repr(rbp)) @@ -438,11 +563,13 @@ class Server(unittest.TestCase): basedir = os.path.join("storage", "Server", name) return basedir - def create(self, name, reserved_space=0, klass=StorageServer, get_current_time=time.time): + def create(self, name, reserved_space=0, klass=StorageServer, clock=None): + if clock is None: + clock = Clock() workdir = self.workdir(name) ss = klass(workdir, b"\x00" * 20, reserved_space=reserved_space, stats_provider=FakeStatsProvider(), - get_current_time=get_current_time) + clock=clock) ss.setServiceParent(self.sparent) return ss @@ -451,31 +578,38 @@ class Server(unittest.TestCase): def test_declares_fixed_1528(self): ss = self.create("test_declares_fixed_1528") - ver = ss.remote_get_version() + ver = ss.get_version() sv1 = ver[b'http://allmydata.org/tahoe/protocols/storage/v1'] self.failUnless(sv1.get(b'prevents-read-past-end-of-share-data'), sv1) def test_declares_maximum_share_sizes(self): ss = self.create("test_declares_maximum_share_sizes") - ver = ss.remote_get_version() + ver = ss.get_version() sv1 = ver[b'http://allmydata.org/tahoe/protocols/storage/v1'] self.failUnlessIn(b'maximum-immutable-share-size', sv1) self.failUnlessIn(b'maximum-mutable-share-size', sv1) def test_declares_available_space(self): ss = self.create("test_declares_available_space") - ver = ss.remote_get_version() + ver = ss.get_version() sv1 = ver[b'http://allmydata.org/tahoe/protocols/storage/v1'] self.failUnlessIn(b'available-space', sv1) - def allocate(self, ss, storage_index, sharenums, size, canary=None): + def allocate(self, ss, storage_index, sharenums, size, renew_leases=True): + """ + Call directly into the storage server's allocate_buckets implementation, + skipping the Foolscap layer. + """ renew_secret = hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)) cancel_secret = hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret)) - if not canary: - canary = FakeCanary() - return ss.remote_allocate_buckets(storage_index, - renew_secret, cancel_secret, - sharenums, size, canary) + if isinstance(ss, FoolscapStorageServer): + ss = ss._server + return ss.allocate_buckets( + storage_index, + renew_secret, cancel_secret, + sharenums, size, + renew_leases=renew_leases, + ) def test_large_share(self): syslow = platform.system().lower() @@ -494,12 +628,12 @@ class Server(unittest.TestCase): shnum, bucket = list(writers.items())[0] # This test is going to hammer your filesystem if it doesn't make a sparse file for this. :-( - bucket.remote_write(2**32, b"ab") - bucket.remote_close() + bucket.write(2**32, b"ab") + bucket.close() - readers = ss.remote_get_buckets(b"allocate") + readers = ss.get_buckets(b"allocate") reader = readers[shnum] - self.failUnlessEqual(reader.remote_read(2**32, 2), b"ab") + self.failUnlessEqual(reader.read(2**32, 2), b"ab") def test_dont_overfill_dirs(self): """ @@ -510,8 +644,8 @@ class Server(unittest.TestCase): ss = self.create("test_dont_overfill_dirs") already, writers = self.allocate(ss, b"storageindex", [0], 10) for i, wb in writers.items(): - wb.remote_write(0, b"%10d" % i) - wb.remote_close() + wb.write(0, b"%10d" % i) + wb.close() storedir = os.path.join(self.workdir("test_dont_overfill_dirs"), "shares") children_of_storedir = set(os.listdir(storedir)) @@ -520,8 +654,8 @@ class Server(unittest.TestCase): # chars the same as the first storageindex. already, writers = self.allocate(ss, b"storageindey", [0], 10) for i, wb in writers.items(): - wb.remote_write(0, b"%10d" % i) - wb.remote_close() + wb.write(0, b"%10d" % i) + wb.close() storedir = os.path.join(self.workdir("test_dont_overfill_dirs"), "shares") new_children_of_storedir = set(os.listdir(storedir)) @@ -531,8 +665,8 @@ class Server(unittest.TestCase): ss = self.create("test_remove_incoming") already, writers = self.allocate(ss, b"vid", list(range(3)), 10) for i,wb in writers.items(): - wb.remote_write(0, b"%10d" % i) - wb.remote_close() + wb.write(0, b"%10d" % i) + wb.close() incoming_share_dir = wb.incominghome incoming_bucket_dir = os.path.dirname(incoming_share_dir) incoming_prefix_dir = os.path.dirname(incoming_bucket_dir) @@ -551,33 +685,45 @@ class Server(unittest.TestCase): # Now abort the writers. for writer in writers.values(): - writer.remote_abort() + writer.abort() self.failUnlessEqual(ss.allocated_size(), 0) + def test_immutable_length(self): + """ + ``get_immutable_share_length()`` returns the length of an immutable + share, as does ``BucketWriter.get_length()``.. + """ + ss = self.create("test_immutable_length") + _, writers = self.allocate(ss, b"allocate", [22], 75) + bucket = writers[22] + bucket.write(0, b"X" * 75) + bucket.close() + self.assertEqual(ss.get_immutable_share_length(b"allocate", 22), 75) + self.assertEqual(ss.get_buckets(b"allocate")[22].get_length(), 75) def test_allocate(self): ss = self.create("test_allocate") - self.failUnlessEqual(ss.remote_get_buckets(b"allocate"), {}) + self.failUnlessEqual(ss.get_buckets(b"allocate"), {}) already,writers = self.allocate(ss, b"allocate", [0,1,2], 75) self.failUnlessEqual(already, set()) self.failUnlessEqual(set(writers.keys()), set([0,1,2])) # while the buckets are open, they should not count as readable - self.failUnlessEqual(ss.remote_get_buckets(b"allocate"), {}) + self.failUnlessEqual(ss.get_buckets(b"allocate"), {}) # close the buckets for i,wb in writers.items(): - wb.remote_write(0, b"%25d" % i) - wb.remote_close() + wb.write(0, b"%25d" % i) + wb.close() # aborting a bucket that was already closed is a no-op - wb.remote_abort() + wb.abort() # now they should be readable - b = ss.remote_get_buckets(b"allocate") + b = ss.get_buckets(b"allocate") self.failUnlessEqual(set(b.keys()), set([0,1,2])) - self.failUnlessEqual(b[0].remote_read(0, 25), b"%25d" % 0) + self.failUnlessEqual(b[0].read(0, 25), b"%25d" % 0) b_str = str(b[0]) self.failUnlessIn("BucketReader", b_str) self.failUnlessIn("mfwgy33dmf2g 0", b_str) @@ -598,21 +744,79 @@ class Server(unittest.TestCase): # aborting the writes should remove the tempfiles for i,wb in writers2.items(): - wb.remote_abort() + wb.abort() already2,writers2 = self.allocate(ss, b"allocate", [2,3,4,5], 75) self.failUnlessEqual(already2, set([0,1,2])) self.failUnlessEqual(set(writers2.keys()), set([5])) for i,wb in writers2.items(): - wb.remote_abort() + wb.abort() for i,wb in writers.items(): - wb.remote_abort() + wb.abort() + + def test_allocate_without_lease_renewal(self): + """ + ``StorageServer._allocate_buckets`` does not renew leases on existing + shares if ``renew_leases`` is ``False``. + """ + first_lease = 456 + second_lease = 543 + storage_index = b"allocate" + + clock = Clock() + clock.advance(first_lease) + ss = self.create( + "test_allocate_without_lease_renewal", + clock=clock, + ) + + # Put a share on there + already, writers = self.allocate( + ss, storage_index, [0], 1, renew_leases=False, + ) + (writer,) = writers.values() + writer.write(0, b"x") + writer.close() + + # It should have a lease granted at the current time. + shares = dict(ss.get_shares(storage_index)) + self.assertEqual( + [first_lease], + list( + lease.get_grant_renew_time_time() + for lease + in ShareFile(shares[0]).get_leases() + ), + ) + + # Let some time pass so we can tell if the lease on share 0 is + # renewed. + clock.advance(second_lease) + + # Put another share on there. + already, writers = self.allocate( + ss, storage_index, [1], 1, renew_leases=False, + ) + (writer,) = writers.values() + writer.write(0, b"x") + writer.close() + + # The first share's lease expiration time is unchanged. + shares = dict(ss.get_shares(storage_index)) + self.assertEqual( + [first_lease], + list( + lease.get_grant_renew_time_time() + for lease + in ShareFile(shares[0]).get_leases() + ), + ) def test_bad_container_version(self): ss = self.create("test_bad_container_version") a,w = self.allocate(ss, b"si1", [0], 10) - w[0].remote_write(0, b"\xff"*10) - w[0].remote_close() + w[0].write(0, b"\xff"*10) + w[0].close() fn = os.path.join(ss.sharedir, storage_index_to_dir(b"si1"), "0") f = open(fn, "rb+") @@ -620,17 +824,28 @@ class Server(unittest.TestCase): f.write(struct.pack(">L", 0)) # this is invalid: minimum used is v1 f.close() - ss.remote_get_buckets(b"allocate") + ss.get_buckets(b"allocate") e = self.failUnlessRaises(UnknownImmutableContainerVersionError, - ss.remote_get_buckets, b"si1") - self.failUnlessIn(" had version 0 but we wanted 1", str(e)) + ss.get_buckets, b"si1") + self.assertEqual(e.filename, fn) + self.assertEqual(e.version, 0) + self.assertIn("had unexpected version 0", str(e)) def test_disconnect(self): # simulate a disconnection - ss = self.create("test_disconnect") + ss = FoolscapStorageServer(self.create("test_disconnect")) + renew_secret = b"r" * 32 + cancel_secret = b"c" * 32 canary = FakeCanary() - already,writers = self.allocate(ss, b"disconnect", [0,1,2], 75, canary) + already,writers = ss.remote_allocate_buckets( + b"disconnect", + renew_secret, + cancel_secret, + sharenums=[0,1,2], + allocated_size=75, + canary=canary, + ) self.failUnlessEqual(already, set()) self.failUnlessEqual(set(writers.keys()), set([0,1,2])) for (f,args,kwargs) in list(canary.disconnectors.values()): @@ -643,6 +858,72 @@ class Server(unittest.TestCase): self.failUnlessEqual(already, set()) self.failUnlessEqual(set(writers.keys()), set([0,1,2])) + def test_reserved_space_immutable_lease(self): + """ + If there is not enough available space to store an additional lease on an + immutable share then ``remote_add_lease`` fails with ``NoSpace`` when + an attempt is made to use it to create a new lease. + """ + disk = FakeDisk(total=1024, used=0) + self.patch(fileutil, "get_disk_stats", disk.get_disk_stats) + + ss = self.create("test_reserved_space_immutable_lease") + + storage_index = b"x" * 16 + renew_secret = b"r" * 32 + cancel_secret = b"c" * 32 + shares = {0: b"y" * 500} + upload_immutable(ss, storage_index, renew_secret, cancel_secret, shares) + + # use up all the available space + disk.use(disk.available) + + # Different secrets to produce a different lease, not a renewal. + renew_secret = b"R" * 32 + cancel_secret = b"C" * 32 + with self.assertRaises(interfaces.NoSpace): + ss.add_lease(storage_index, renew_secret, cancel_secret) + + def test_reserved_space_mutable_lease(self): + """ + If there is not enough available space to store an additional lease on a + mutable share then ``remote_add_lease`` fails with ``NoSpace`` when an + attempt is made to use it to create a new lease. + """ + disk = FakeDisk(total=1024, used=0) + self.patch(fileutil, "get_disk_stats", disk.get_disk_stats) + + ss = self.create("test_reserved_space_mutable_lease") + + renew_secrets = iter( + "{}{}".format("r" * 31, i).encode("ascii") + for i + in range(5) + ) + + storage_index = b"x" * 16 + write_enabler = b"w" * 32 + cancel_secret = b"c" * 32 + secrets = (write_enabler, next(renew_secrets), cancel_secret) + shares = {0: b"y" * 500} + upload_mutable(ss, storage_index, secrets, shares) + + # use up all the available space + disk.use(disk.available) + + # The upload created one lease. There is room for three more leases + # in the share header. Even if we're out of disk space, on a boring + # enough filesystem we can write these. + for i in range(3): + ss.add_lease(storage_index, next(renew_secrets), cancel_secret) + + # Having used all of the space for leases in the header, we would have + # to allocate storage for the next lease. Since there is no space + # available, this must fail instead. + with self.assertRaises(interfaces.NoSpace): + ss.add_lease(storage_index, next(renew_secrets), cancel_secret) + + def test_reserved_space(self): reserved = 10000 allocated = 0 @@ -655,40 +936,49 @@ class Server(unittest.TestCase): } self.patch(fileutil, 'get_disk_stats', call_get_disk_stats) - ss = self.create("test_reserved_space", reserved_space=reserved) + ss = FoolscapStorageServer(self.create("test_reserved_space", reserved_space=reserved)) # 15k available, 10k reserved, leaves 5k for shares # a newly created and filled share incurs this much overhead, beyond # the size we request. OVERHEAD = 3*4 LEASE_SIZE = 4+32+32+4 + renew_secret = b"r" * 32 + cancel_secret = b"c" * 32 canary = FakeCanary() - already, writers = self.allocate(ss, b"vid1", [0,1,2], 1000, canary) + already, writers = ss.remote_allocate_buckets( + b"vid1", + renew_secret, + cancel_secret, + sharenums=[0,1,2], + allocated_size=1000, + canary=canary, + ) self.failUnlessEqual(len(writers), 3) # now the StorageServer should have 3000 bytes provisionally # allocated, allowing only 2000 more to be claimed - self.failUnlessEqual(len(ss._bucket_writers), 3) + self.failUnlessEqual(len(ss._server._bucket_writers), 3) # allocating 1001-byte shares only leaves room for one canary2 = FakeCanary() already2, writers2 = self.allocate(ss, b"vid2", [0,1,2], 1001, canary2) self.failUnlessEqual(len(writers2), 1) - self.failUnlessEqual(len(ss._bucket_writers), 4) + self.failUnlessEqual(len(ss._server._bucket_writers), 4) # we abandon the first set, so their provisional allocation should be # returned canary.disconnected() - self.failUnlessEqual(len(ss._bucket_writers), 1) + self.failUnlessEqual(len(ss._server._bucket_writers), 1) # now we have a provisional allocation of 1001 bytes # and we close the second set, so their provisional allocation should # become real, long-term allocation, and grows to include the # overhead. for bw in writers2.values(): - bw.remote_write(0, b"a"*25) - bw.remote_close() - self.failUnlessEqual(len(ss._bucket_writers), 0) + bw.write(0, b"a"*25) + bw.close() + self.failUnlessEqual(len(ss._server._bucket_writers), 0) # this also changes the amount reported as available by call_get_disk_stats allocated = 1001 + OVERHEAD + LEASE_SIZE @@ -696,14 +986,21 @@ class Server(unittest.TestCase): # now there should be ALLOCATED=1001+12+72=1085 bytes allocated, and # 5000-1085=3915 free, therefore we can fit 39 100byte shares canary3 = FakeCanary() - already3, writers3 = self.allocate(ss, b"vid3", list(range(100)), 100, canary3) + already3, writers3 = ss.remote_allocate_buckets( + b"vid3", + renew_secret, + cancel_secret, + sharenums=list(range(100)), + allocated_size=100, + canary=canary3, + ) self.failUnlessEqual(len(writers3), 39) - self.failUnlessEqual(len(ss._bucket_writers), 39) + self.failUnlessEqual(len(ss._server._bucket_writers), 39) canary3.disconnected() - self.failUnlessEqual(len(ss._bucket_writers), 0) - ss.disownServiceParent() + self.failUnlessEqual(len(ss._server._bucket_writers), 0) + ss._server.disownServiceParent() del ss def test_seek(self): @@ -732,91 +1029,92 @@ class Server(unittest.TestCase): Given a StorageServer, create a bucket with 5 shares and return renewal and cancellation secrets. """ - canary = FakeCanary() sharenums = list(range(5)) size = 100 # Creating a bucket also creates a lease: rs, cs = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)), hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret))) - already, writers = ss.remote_allocate_buckets(storage_index, rs, cs, - sharenums, size, canary) + already, writers = ss.allocate_buckets(storage_index, rs, cs, + sharenums, size) self.failUnlessEqual(len(already), expected_already) self.failUnlessEqual(len(writers), expected_writers) for wb in writers.values(): - wb.remote_close() + wb.close() return rs, cs def test_leases(self): ss = self.create("test_leases") - canary = FakeCanary() sharenums = list(range(5)) size = 100 # Create a bucket: rs0, cs0 = self.create_bucket_5_shares(ss, b"si0") - leases = list(ss.get_leases(b"si0")) - self.failUnlessEqual(len(leases), 1) - self.failUnlessEqual(set([l.renew_secret for l in leases]), set([rs0])) + + # Upload of an immutable implies creation of a single lease with the + # supplied secrets. + (lease,) = ss.get_leases(b"si0") + self.assertTrue(lease.is_renew_secret(rs0)) rs1, cs1 = self.create_bucket_5_shares(ss, b"si1") # take out a second lease on si1 rs2, cs2 = self.create_bucket_5_shares(ss, b"si1", 5, 0) - leases = list(ss.get_leases(b"si1")) - self.failUnlessEqual(len(leases), 2) - self.failUnlessEqual(set([l.renew_secret for l in leases]), set([rs1, rs2])) + (lease1, lease2) = ss.get_leases(b"si1") + self.assertTrue(lease1.is_renew_secret(rs1)) + self.assertTrue(lease2.is_renew_secret(rs2)) # and a third lease, using add-lease rs2a,cs2a = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)), hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret))) - ss.remote_add_lease(b"si1", rs2a, cs2a) - leases = list(ss.get_leases(b"si1")) - self.failUnlessEqual(len(leases), 3) - self.failUnlessEqual(set([l.renew_secret for l in leases]), set([rs1, rs2, rs2a])) + ss.add_lease(b"si1", rs2a, cs2a) + (lease1, lease2, lease3) = ss.get_leases(b"si1") + self.assertTrue(lease1.is_renew_secret(rs1)) + self.assertTrue(lease2.is_renew_secret(rs2)) + self.assertTrue(lease3.is_renew_secret(rs2a)) # add-lease on a missing storage index is silently ignored - self.failUnlessEqual(ss.remote_add_lease(b"si18", b"", b""), None) + self.assertIsNone(ss.add_lease(b"si18", b"", b"")) # check that si0 is readable - readers = ss.remote_get_buckets(b"si0") + readers = ss.get_buckets(b"si0") self.failUnlessEqual(len(readers), 5) # renew the first lease. Only the proper renew_secret should work - ss.remote_renew_lease(b"si0", rs0) - self.failUnlessRaises(IndexError, ss.remote_renew_lease, b"si0", cs0) - self.failUnlessRaises(IndexError, ss.remote_renew_lease, b"si0", rs1) + ss.renew_lease(b"si0", rs0) + self.failUnlessRaises(IndexError, ss.renew_lease, b"si0", cs0) + self.failUnlessRaises(IndexError, ss.renew_lease, b"si0", rs1) # check that si0 is still readable - readers = ss.remote_get_buckets(b"si0") + readers = ss.get_buckets(b"si0") self.failUnlessEqual(len(readers), 5) # There is no such method as remote_cancel_lease for now -- see # ticket #1528. - self.failIf(hasattr(ss, 'remote_cancel_lease'), \ - "ss should not have a 'remote_cancel_lease' method/attribute") + self.failIf(hasattr(FoolscapStorageServer(ss), 'remote_cancel_lease'), \ + "ss should not have a 'remote_cancel_lease' method/attribute") # test overlapping uploads rs3,cs3 = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)), hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret))) rs4,cs4 = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)), hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret))) - already,writers = ss.remote_allocate_buckets(b"si3", rs3, cs3, - sharenums, size, canary) + already,writers = ss.allocate_buckets(b"si3", rs3, cs3, + sharenums, size) self.failUnlessEqual(len(already), 0) self.failUnlessEqual(len(writers), 5) - already2,writers2 = ss.remote_allocate_buckets(b"si3", rs4, cs4, - sharenums, size, canary) + already2,writers2 = ss.allocate_buckets(b"si3", rs4, cs4, + sharenums, size) self.failUnlessEqual(len(already2), 0) self.failUnlessEqual(len(writers2), 0) for wb in writers.values(): - wb.remote_close() + wb.close() leases = list(ss.get_leases(b"si3")) self.failUnlessEqual(len(leases), 1) - already3,writers3 = ss.remote_allocate_buckets(b"si3", rs4, cs4, - sharenums, size, canary) + already3,writers3 = ss.allocate_buckets(b"si3", rs4, cs4, + sharenums, size) self.failUnlessEqual(len(already3), 5) self.failUnlessEqual(len(writers3), 0) @@ -830,20 +1128,20 @@ class Server(unittest.TestCase): """ clock = Clock() clock.advance(123) - ss = self.create("test_immutable_add_lease_renews", get_current_time=clock.seconds) + ss = self.create("test_immutable_add_lease_renews", clock=clock) # Start out with single lease created with bucket: renewal_secret, cancel_secret = self.create_bucket_5_shares(ss, b"si0") [lease] = ss.get_leases(b"si0") - self.assertEqual(lease.expiration_time, 123 + DEFAULT_RENEWAL_TIME) + self.assertEqual(lease.get_expiration_time(), 123 + DEFAULT_RENEWAL_TIME) # Time passes: clock.advance(123456) # Adding a lease with matching renewal secret just renews it: - ss.remote_add_lease(b"si0", renewal_secret, cancel_secret) + ss.add_lease(b"si0", renewal_secret, cancel_secret) [lease] = ss.get_leases(b"si0") - self.assertEqual(lease.expiration_time, 123 + 123456 + DEFAULT_RENEWAL_TIME) + self.assertEqual(lease.get_expiration_time(), 123 + 123456 + DEFAULT_RENEWAL_TIME) def test_have_shares(self): """By default the StorageServer has no shares.""" @@ -877,14 +1175,35 @@ class Server(unittest.TestCase): self.failUnlessEqual(already, set()) self.failUnlessEqual(set(writers.keys()), set([0,1,2])) for i,wb in writers.items(): - wb.remote_write(0, b"%25d" % i) - wb.remote_close() + wb.write(0, b"%25d" % i) + wb.close() # since we discard the data, the shares should be present but sparse. # Since we write with some seeks, the data we read back will be all # zeros. - b = ss.remote_get_buckets(b"vid") + b = ss.get_buckets(b"vid") self.failUnlessEqual(set(b.keys()), set([0,1,2])) - self.failUnlessEqual(b[0].remote_read(0, 25), b"\x00" * 25) + self.failUnlessEqual(b[0].read(0, 25), b"\x00" * 25) + + def test_reserved_space_advise_corruption(self): + """ + If there is no available space then ``remote_advise_corrupt_share`` does + not write a corruption report. + """ + disk = FakeDisk(total=1024, used=1024) + self.patch(fileutil, "get_disk_stats", disk.get_disk_stats) + + workdir = self.workdir("test_reserved_space_advise_corruption") + ss = StorageServer(workdir, b"\x00" * 20, discard_storage=True) + ss.setServiceParent(self.sparent) + + upload_immutable(ss, b"si0", b"r" * 32, b"c" * 32, {0: b""}) + ss.advise_corrupt_share(b"immutable", b"si0", 0, + b"This share smells funny.\n") + + self.assertEqual( + [], + os.listdir(ss.corruption_advisory_dir), + ) def test_advise_corruption(self): workdir = self.workdir("test_advise_corruption") @@ -892,8 +1211,9 @@ class Server(unittest.TestCase): ss.setServiceParent(self.sparent) si0_s = base32.b2a(b"si0") - ss.remote_advise_corrupt_share(b"immutable", b"si0", 0, - b"This share smells funny.\n") + upload_immutable(ss, b"si0", b"r" * 32, b"c" * 32, {0: b""}) + ss.advise_corrupt_share(b"immutable", b"si0", 0, + b"This share smells funny.\n") reportdir = os.path.join(workdir, "corruption-advisories") reports = os.listdir(reportdir) self.failUnlessEqual(len(reports), 1) @@ -912,12 +1232,12 @@ class Server(unittest.TestCase): already,writers = self.allocate(ss, b"si1", [1], 75) self.failUnlessEqual(already, set()) self.failUnlessEqual(set(writers.keys()), set([1])) - writers[1].remote_write(0, b"data") - writers[1].remote_close() + writers[1].write(0, b"data") + writers[1].close() - b = ss.remote_get_buckets(b"si1") + b = ss.get_buckets(b"si1") self.failUnlessEqual(set(b.keys()), set([1])) - b[1].remote_advise_corrupt_share(b"This share tastes like dust.\n") + b[1].advise_corrupt_share(b"This share tastes like dust.\n") reports = os.listdir(reportdir) self.failUnlessEqual(len(reports), 2) @@ -930,6 +1250,26 @@ class Server(unittest.TestCase): self.failUnlessIn(b"share_number: 1", report) self.failUnlessIn(b"This share tastes like dust.", report) + def test_advise_corruption_missing(self): + """ + If a corruption advisory is received for a share that is not present on + this server then it is not persisted. + """ + workdir = self.workdir("test_advise_corruption_missing") + ss = StorageServer(workdir, b"\x00" * 20, discard_storage=True) + ss.setServiceParent(self.sparent) + + # Upload one share for this storage index + upload_immutable(ss, b"si0", b"r" * 32, b"c" * 32, {0: b""}) + + # And try to submit a corruption advisory about a different share + ss.advise_corrupt_share(b"immutable", b"si0", 1, + b"This share smells funny.\n") + + self.assertEqual( + [], + os.listdir(ss.corruption_advisory_dir), + ) class MutableServer(unittest.TestCase): @@ -944,10 +1284,12 @@ class MutableServer(unittest.TestCase): basedir = os.path.join("storage", "MutableServer", name) return basedir - def create(self, name, get_current_time=time.time): + def create(self, name, clock=None): workdir = self.workdir(name) + if clock is None: + clock = Clock() ss = StorageServer(workdir, b"\x00" * 20, - get_current_time=get_current_time) + clock=clock) ss.setServiceParent(self.sparent) return ss @@ -973,7 +1315,7 @@ class MutableServer(unittest.TestCase): write_enabler = self.write_enabler(we_tag) renew_secret = self.renew_secret(lease_tag) cancel_secret = self.cancel_secret(lease_tag) - rstaraw = ss.remote_slot_testv_and_readv_and_writev + rstaraw = ss.slot_testv_and_readv_and_writev testandwritev = dict( [ (shnum, ([], [], None) ) for shnum in sharenums ] ) readv = [] @@ -986,6 +1328,64 @@ class MutableServer(unittest.TestCase): self.failUnless(isinstance(readv_data, dict)) self.failUnlessEqual(len(readv_data), 0) + def test_enumerate_mutable_shares(self): + """ + ``StorageServer.enumerate_mutable_shares()`` returns a set of share + numbers for the given storage index, or an empty set if it does not + exist at all. + """ + ss = self.create("test_enumerate_mutable_shares") + + # Initially, nothing exists: + empty = ss.enumerate_mutable_shares(b"si1") + + self.allocate(ss, b"si1", b"we1", b"le1", [0, 1, 4, 2], 12) + shares0_1_2_4 = ss.enumerate_mutable_shares(b"si1") + + # Remove share 2, by setting size to 0: + secrets = (self.write_enabler(b"we1"), + self.renew_secret(b"le1"), + self.cancel_secret(b"le1")) + ss.slot_testv_and_readv_and_writev(b"si1", secrets, {2: ([], [], 0)}, []) + shares0_1_4 = ss.enumerate_mutable_shares(b"si1") + self.assertEqual( + (empty, shares0_1_2_4, shares0_1_4), + (set(), {0, 1, 2, 4}, {0, 1, 4}) + ) + + def test_mutable_share_length(self): + """``get_mutable_share_length()`` returns the length of the share.""" + ss = self.create("test_mutable_share_length") + self.allocate(ss, b"si1", b"we1", b"le1", [16], 23) + ss.slot_testv_and_readv_and_writev( + b"si1", (self.write_enabler(b"we1"), + self.renew_secret(b"le1"), + self.cancel_secret(b"le1")), + {16: ([], [(0, b"x" * 23)], None)}, + [] + ) + self.assertEqual(ss.get_mutable_share_length(b"si1", 16), 23) + + def test_mutable_share_length_unknown(self): + """ + ``get_mutable_share_length()`` raises a ``KeyError`` on unknown shares. + """ + ss = self.create("test_mutable_share_length_unknown") + self.allocate(ss, b"si1", b"we1", b"le1", [16], 23) + ss.slot_testv_and_readv_and_writev( + b"si1", (self.write_enabler(b"we1"), + self.renew_secret(b"le1"), + self.cancel_secret(b"le1")), + {16: ([], [(0, b"x" * 23)], None)}, + [] + ) + with self.assertRaises(KeyError): + # Wrong share number. + ss.get_mutable_share_length(b"si1", 17) + with self.assertRaises(KeyError): + # Wrong storage index + ss.get_mutable_share_length(b"unknown", 16) + def test_bad_magic(self): ss = self.create("test_bad_magic") self.allocate(ss, b"si1", b"we1", next(self._lease_secret), set([0]), 10) @@ -994,18 +1394,20 @@ class MutableServer(unittest.TestCase): f.seek(0) f.write(b"BAD MAGIC") f.close() - read = ss.remote_slot_readv + read = ss.slot_readv e = self.failUnlessRaises(UnknownMutableContainerVersionError, read, b"si1", [0], [(0,10)]) - self.failUnlessIn(" had magic ", str(e)) - self.failUnlessIn(" but we wanted ", str(e)) + self.assertEqual(e.filename, fn) + self.assertTrue(e.version.startswith(b"BAD MAGIC")) + self.assertIn("had unexpected version", str(e)) + self.assertIn("BAD MAGIC", str(e)) def test_container_size(self): ss = self.create("test_container_size") self.allocate(ss, b"si1", b"we1", next(self._lease_secret), set([0,1,2]), 100) - read = ss.remote_slot_readv - rstaraw = ss.remote_slot_testv_and_readv_and_writev + read = ss.slot_readv + rstaraw = ss.slot_testv_and_readv_and_writev secrets = ( self.write_enabler(b"we1"), self.renew_secret(b"we1"), self.cancel_secret(b"we1") ) @@ -1085,7 +1487,7 @@ class MutableServer(unittest.TestCase): # Also see if the server explicitly declares that it supports this # feature. - ver = ss.remote_get_version() + ver = ss.get_version() storage_v1_ver = ver[b"http://allmydata.org/tahoe/protocols/storage/v1"] self.failUnless(storage_v1_ver.get(b"fills-holes-with-zero-bytes")) @@ -1103,7 +1505,7 @@ class MutableServer(unittest.TestCase): self.allocate(ss, b"si1", b"we1", next(self._lease_secret), set([0,1,2]), 100) - read = ss.remote_slot_readv + read = ss.slot_readv self.failUnlessEqual(read(b"si1", [0], [(0, 10)]), {0: [b""]}) self.failUnlessEqual(read(b"si1", [], [(0, 10)]), @@ -1116,7 +1518,7 @@ class MutableServer(unittest.TestCase): self.renew_secret(b"we1"), self.cancel_secret(b"we1") ) data = b"".join([ (b"%d" % i) * 10 for i in range(10) ]) - write = ss.remote_slot_testv_and_readv_and_writev + write = ss.slot_testv_and_readv_and_writev answer = write(b"si1", secrets, {0: ([], [(0,data)], None)}, []) @@ -1126,7 +1528,7 @@ class MutableServer(unittest.TestCase): {0: [b"00000000001111111111"]}) self.failUnlessEqual(read(b"si1", [0], [(95,10)]), {0: [b"99999"]}) - #self.failUnlessEqual(s0.remote_get_length(), 100) + #self.failUnlessEqual(s0.get_length(), 100) bad_secrets = (b"bad write enabler", secrets[1], secrets[2]) f = self.failUnlessRaises(BadWriteEnablerError, @@ -1160,8 +1562,8 @@ class MutableServer(unittest.TestCase): self.renew_secret(b"we1"), self.cancel_secret(b"we1") ) data = b"".join([ (b"%d" % i) * 10 for i in range(10) ]) - write = ss.remote_slot_testv_and_readv_and_writev - read = ss.remote_slot_readv + write = ss.slot_testv_and_readv_and_writev + read = ss.slot_readv def reset(): write(b"si1", secrets, @@ -1205,8 +1607,8 @@ class MutableServer(unittest.TestCase): self.renew_secret(b"we1"), self.cancel_secret(b"we1") ) data = b"".join([ (b"%d" % i) * 10 for i in range(10) ]) - write = ss.remote_slot_testv_and_readv_and_writev - read = ss.remote_slot_readv + write = ss.slot_testv_and_readv_and_writev + read = ss.slot_readv data = [(b"%d" % i) * 100 for i in range(3)] rc = write(b"si1", secrets, {0: ([], [(0,data[0])], None), @@ -1221,25 +1623,25 @@ class MutableServer(unittest.TestCase): 2: [b"2"*10]}) def compare_leases_without_timestamps(self, leases_a, leases_b): - self.failUnlessEqual(len(leases_a), len(leases_b)) - for i in range(len(leases_a)): - a = leases_a[i] - b = leases_b[i] - self.failUnlessEqual(a.owner_num, b.owner_num) - self.failUnlessEqual(a.renew_secret, b.renew_secret) - self.failUnlessEqual(a.cancel_secret, b.cancel_secret) - self.failUnlessEqual(a.nodeid, b.nodeid) - - def compare_leases(self, leases_a, leases_b): - self.failUnlessEqual(len(leases_a), len(leases_b)) - for i in range(len(leases_a)): - a = leases_a[i] - b = leases_b[i] - self.failUnlessEqual(a.owner_num, b.owner_num) - self.failUnlessEqual(a.renew_secret, b.renew_secret) - self.failUnlessEqual(a.cancel_secret, b.cancel_secret) - self.failUnlessEqual(a.nodeid, b.nodeid) - self.failUnlessEqual(a.expiration_time, b.expiration_time) + """ + Assert that, except for expiration times, ``leases_a`` contains the same + lease information as ``leases_b``. + """ + for a, b in zip(leases_a, leases_b): + # The leases aren't always of the same type (though of course + # corresponding elements in the two lists should be of the same + # type as each other) so it's inconvenient to just reach in and + # normalize the expiration timestamp. We don't want to call + # `renew` on both objects to normalize the expiration timestamp in + # case `renew` is broken and gives us back equal outputs from + # non-equal inputs (expiration timestamp aside). It seems + # reasonably safe to use `renew` to make _one_ of the timestamps + # equal to the other though. + self.assertEqual( + a.renew(b.get_expiration_time()), + b, + ) + self.assertEqual(len(leases_a), len(leases_b)) def test_leases(self): ss = self.create("test_leases") @@ -1248,8 +1650,8 @@ class MutableServer(unittest.TestCase): self.renew_secret(b"we1-%d" % n), self.cancel_secret(b"we1-%d" % n) ) data = b"".join([ (b"%d" % i) * 10 for i in range(10) ]) - write = ss.remote_slot_testv_and_readv_and_writev - read = ss.remote_slot_readv + write = ss.slot_testv_and_readv_and_writev + read = ss.slot_readv rc = write(b"si1", secrets(0), {0: ([], [(0,data)], None)}, []) self.failUnlessEqual(rc, (True, {})) @@ -1265,7 +1667,7 @@ class MutableServer(unittest.TestCase): self.failUnlessEqual(len(list(s0.get_leases())), 1) # add-lease on a missing storage index is silently ignored - self.failUnlessEqual(ss.remote_add_lease(b"si18", b"", b""), None) + self.failUnlessEqual(ss.add_lease(b"si18", b"", b""), None) # re-allocate the slots and use the same secrets, that should update # the lease @@ -1273,7 +1675,7 @@ class MutableServer(unittest.TestCase): self.failUnlessEqual(len(list(s0.get_leases())), 1) # renew it directly - ss.remote_renew_lease(b"si1", secrets(0)[1]) + ss.renew_lease(b"si1", secrets(0)[1]) self.failUnlessEqual(len(list(s0.get_leases())), 1) # now allocate them with a bunch of different secrets, to trigger the @@ -1281,7 +1683,7 @@ class MutableServer(unittest.TestCase): write(b"si1", secrets(1), {0: ([], [(0,data)], None)}, []) self.failUnlessEqual(len(list(s0.get_leases())), 2) secrets2 = secrets(2) - ss.remote_add_lease(b"si1", secrets2[1], secrets2[2]) + ss.add_lease(b"si1", secrets2[1], secrets2[2]) self.failUnlessEqual(len(list(s0.get_leases())), 3) write(b"si1", secrets(3), {0: ([], [(0,data)], None)}, []) write(b"si1", secrets(4), {0: ([], [(0,data)], None)}, []) @@ -1299,11 +1701,11 @@ class MutableServer(unittest.TestCase): # read back the leases, make sure they're still intact. self.compare_leases_without_timestamps(all_leases, list(s0.get_leases())) - ss.remote_renew_lease(b"si1", secrets(0)[1]) - ss.remote_renew_lease(b"si1", secrets(1)[1]) - ss.remote_renew_lease(b"si1", secrets(2)[1]) - ss.remote_renew_lease(b"si1", secrets(3)[1]) - ss.remote_renew_lease(b"si1", secrets(4)[1]) + ss.renew_lease(b"si1", secrets(0)[1]) + ss.renew_lease(b"si1", secrets(1)[1]) + ss.renew_lease(b"si1", secrets(2)[1]) + ss.renew_lease(b"si1", secrets(3)[1]) + ss.renew_lease(b"si1", secrets(4)[1]) self.compare_leases_without_timestamps(all_leases, list(s0.get_leases())) # get a new copy of the leases, with the current timestamps. Reading # data and failing to renew/cancel leases should leave the timestamps @@ -1314,18 +1716,18 @@ class MutableServer(unittest.TestCase): # examine the exception thus raised, make sure the old nodeid is # present, to provide for share migration e = self.failUnlessRaises(IndexError, - ss.remote_renew_lease, b"si1", + ss.renew_lease, b"si1", secrets(20)[1]) e_s = str(e) self.failUnlessIn("Unable to renew non-existent lease", e_s) self.failUnlessIn("I have leases accepted by nodeids:", e_s) self.failUnlessIn("nodeids: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' .", e_s) - self.compare_leases(all_leases, list(s0.get_leases())) + self.assertEqual(all_leases, list(s0.get_leases())) # reading shares should not modify the timestamp read(b"si1", [], [(0,200)]) - self.compare_leases(all_leases, list(s0.get_leases())) + self.assertEqual(all_leases, list(s0.get_leases())) write(b"si1", secrets(0), {0: ([], [(200, b"make me bigger")], None)}, []) @@ -1343,13 +1745,13 @@ class MutableServer(unittest.TestCase): clock = Clock() clock.advance(235) ss = self.create("test_mutable_add_lease_renews", - get_current_time=clock.seconds) + clock=clock) def secrets(n): return ( self.write_enabler(b"we1"), self.renew_secret(b"we1-%d" % n), self.cancel_secret(b"we1-%d" % n) ) data = b"".join([ (b"%d" % i) * 10 for i in range(10) ]) - write = ss.remote_slot_testv_and_readv_and_writev + write = ss.slot_testv_and_readv_and_writev write_enabler, renew_secret, cancel_secret = secrets(0) rc = write(b"si1", (write_enabler, renew_secret, cancel_secret), {0: ([], [(0,data)], None)}, []) @@ -1359,23 +1761,23 @@ class MutableServer(unittest.TestCase): "shares", storage_index_to_dir(b"si1")) s0 = MutableShareFile(os.path.join(bucket_dir, "0")) [lease] = s0.get_leases() - self.assertEqual(lease.expiration_time, 235 + DEFAULT_RENEWAL_TIME) + self.assertEqual(lease.get_expiration_time(), 235 + DEFAULT_RENEWAL_TIME) # Time passes... clock.advance(835) # Adding a lease renews it: - ss.remote_add_lease(b"si1", renew_secret, cancel_secret) + ss.add_lease(b"si1", renew_secret, cancel_secret) [lease] = s0.get_leases() - self.assertEqual(lease.expiration_time, + self.assertEqual(lease.get_expiration_time(), 235 + 835 + DEFAULT_RENEWAL_TIME) def test_remove(self): ss = self.create("test_remove") self.allocate(ss, b"si1", b"we1", next(self._lease_secret), set([0,1,2]), 100) - readv = ss.remote_slot_readv - writev = ss.remote_slot_testv_and_readv_and_writev + readv = ss.slot_readv + writev = ss.slot_testv_and_readv_and_writev secrets = ( self.write_enabler(b"we1"), self.renew_secret(b"we1"), self.cancel_secret(b"we1") ) @@ -1479,7 +1881,7 @@ class MutableServer(unittest.TestCase): # We don't even need to create any shares to exercise this # functionality. Just go straight to sending a truncate-to-zero # write. - testv_is_good, read_data = ss.remote_slot_testv_and_readv_and_writev( + testv_is_good, read_data = ss.slot_testv_and_readv_and_writev( storage_index=storage_index, secrets=secrets, test_and_write_vectors={ @@ -1497,7 +1899,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.sparent = LoggingServiceParent() self._lease_secret = itertools.count() self.ss = self.create("MDMFProxies storage test server") - self.rref = RemoteBucket(self.ss) + self.rref = RemoteBucket(FoolscapStorageServer(self.ss)) self.storage_server = _StorageServer(lambda: self.rref) self.secrets = (self.write_enabler(b"we_secret"), self.renew_secret(b"renew_secret"), @@ -1664,7 +2066,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): If tail_segment=True, then I will write a share that has a smaller tail segment than other segments. """ - write = self.ss.remote_slot_testv_and_readv_and_writev + write = self.ss.slot_testv_and_readv_and_writev data = self.build_test_mdmf_share(tail_segment, empty) # Finally, we write the whole thing to the storage server in one # pass. @@ -1732,7 +2134,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): empty=False): # Some tests need SDMF shares to verify that we can still # read them. This method writes one, which resembles but is not - write = self.ss.remote_slot_testv_and_readv_and_writev + write = self.ss.slot_testv_and_readv_and_writev share = self.build_test_sdmf_share(empty) testvs = [(0, 1, b"eq", b"")] tws = {} @@ -2064,7 +2466,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): # blocks. mw = self._make_new_mw(b"si1", 0) # Test writing some blocks. - read = self.ss.remote_slot_readv + read = self.ss.slot_readv expected_private_key_offset = struct.calcsize(MDMFHEADER) expected_sharedata_offset = struct.calcsize(MDMFHEADER) + \ PRIVATE_KEY_SIZE + \ @@ -2855,7 +3257,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d = sdmfr.finish_publishing() def _then(ignored): self.failUnlessEqual(self.rref.write_count, 1) - read = self.ss.remote_slot_readv + read = self.ss.slot_readv self.failUnlessEqual(read(b"si1", [0], [(0, len(data))]), {0: [data]}) d.addCallback(_then) @@ -2912,7 +3314,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): sdmfw.finish_publishing()) def _then_again(results): self.failUnless(results[0]) - read = self.ss.remote_slot_readv + read = self.ss.slot_readv self.failUnlessEqual(read(b"si1", [0], [(1, 8)]), {0: [struct.pack(">Q", 1)]}) self.failUnlessEqual(read(b"si1", [0], [(9, len(data) - 9)]), @@ -3005,37 +3407,342 @@ class Stats(unittest.TestCase): self.failUnless(output["get"]["99_0_percentile"] is None, output) self.failUnless(output["get"]["99_9_percentile"] is None, output) +immutable_schemas = strategies.sampled_from(list(ALL_IMMUTABLE_SCHEMAS)) class ShareFileTests(unittest.TestCase): """Tests for allmydata.storage.immutable.ShareFile.""" - def get_sharefile(self): - sf = ShareFile(self.mktemp(), max_size=1000, create=True) + def get_sharefile(self, **kwargs): + sf = ShareFile(self.mktemp(), max_size=1000, create=True, **kwargs) sf.write_share_data(0, b"abc") sf.write_share_data(2, b"DEF") # Should be b'abDEF' now. return sf - def test_read_write(self): + @given(immutable_schemas) + def test_read_write(self, schema): """Basic writes can be read.""" - sf = self.get_sharefile() + sf = self.get_sharefile(schema=schema) self.assertEqual(sf.read_share_data(0, 3), b"abD") self.assertEqual(sf.read_share_data(1, 4), b"bDEF") - def test_reads_beyond_file_end(self): + @given(immutable_schemas) + def test_reads_beyond_file_end(self, schema): """Reads beyond the file size are truncated.""" - sf = self.get_sharefile() + sf = self.get_sharefile(schema=schema) self.assertEqual(sf.read_share_data(0, 10), b"abDEF") self.assertEqual(sf.read_share_data(5, 10), b"") - def test_too_large_write(self): + @given(immutable_schemas) + def test_too_large_write(self, schema): """Can't do write larger than file size.""" - sf = self.get_sharefile() + sf = self.get_sharefile(schema=schema) with self.assertRaises(DataTooLargeError): sf.write_share_data(0, b"x" * 3000) - def test_no_leases_cancelled(self): + @given(immutable_schemas) + def test_no_leases_cancelled(self, schema): """If no leases were cancelled, IndexError is raised.""" - sf = self.get_sharefile() + sf = self.get_sharefile(schema=schema) with self.assertRaises(IndexError): sf.cancel_lease(b"garbage") + + @given(immutable_schemas) + def test_long_lease_count_format(self, schema): + """ + ``ShareFile.__init__`` raises ``ValueError`` if the lease count format + given is longer than one character. + """ + with self.assertRaises(ValueError): + self.get_sharefile(schema=schema, lease_count_format="BB") + + @given(immutable_schemas) + def test_large_lease_count_format(self, schema): + """ + ``ShareFile.__init__`` raises ``ValueError`` if the lease count format + encodes to a size larger than 8 bytes. + """ + with self.assertRaises(ValueError): + self.get_sharefile(schema=schema, lease_count_format="Q") + + @given(immutable_schemas) + def test_avoid_lease_overflow(self, schema): + """ + If the share file already has the maximum number of leases supported then + ``ShareFile.add_lease`` raises ``struct.error`` and makes no changes + to the share file contents. + """ + make_lease = partial( + LeaseInfo, + renew_secret=b"r" * 32, + cancel_secret=b"c" * 32, + expiration_time=2 ** 31, + ) + # Make it a little easier to reach the condition by limiting the + # number of leases to only 255. + sf = self.get_sharefile(schema=schema, lease_count_format="B") + + # Add the leases. + for i in range(2 ** 8 - 1): + lease = make_lease(owner_num=i) + sf.add_lease(lease) + + # Capture the state of the share file at this point so we can + # determine whether the next operation modifies it or not. + with open(sf.home, "rb") as f: + before_data = f.read() + + # It is not possible to add a 256th lease. + lease = make_lease(owner_num=256) + with self.assertRaises(struct.error): + sf.add_lease(lease) + + # Compare the share file state to what we captured earlier. Any + # change is a bug. + with open(sf.home, "rb") as f: + after_data = f.read() + + self.assertEqual(before_data, after_data) + + @given(immutable_schemas) + def test_renew_secret(self, schema): + """ + A lease loaded from an immutable share file at any schema version can have + its renew secret verified. + """ + renew_secret = b"r" * 32 + cancel_secret = b"c" * 32 + expiration_time = 2 ** 31 + + sf = self.get_sharefile(schema=schema) + lease = LeaseInfo( + owner_num=0, + renew_secret=renew_secret, + cancel_secret=cancel_secret, + expiration_time=expiration_time, + ) + sf.add_lease(lease) + (loaded_lease,) = sf.get_leases() + self.assertTrue(loaded_lease.is_renew_secret(renew_secret)) + + @given(immutable_schemas) + def test_cancel_secret(self, schema): + """ + A lease loaded from an immutable share file at any schema version can have + its cancel secret verified. + """ + renew_secret = b"r" * 32 + cancel_secret = b"c" * 32 + expiration_time = 2 ** 31 + + sf = self.get_sharefile(schema=schema) + lease = LeaseInfo( + owner_num=0, + renew_secret=renew_secret, + cancel_secret=cancel_secret, + expiration_time=expiration_time, + ) + sf.add_lease(lease) + (loaded_lease,) = sf.get_leases() + self.assertTrue(loaded_lease.is_cancel_secret(cancel_secret)) + +mutable_schemas = strategies.sampled_from(list(ALL_MUTABLE_SCHEMAS)) + +class MutableShareFileTests(unittest.TestCase): + """ + Tests for allmydata.storage.mutable.MutableShareFile. + """ + def get_sharefile(self, **kwargs): + return MutableShareFile(self.mktemp(), **kwargs) + + @given( + schema=mutable_schemas, + nodeid=strategies.just(b"x" * 20), + write_enabler=strategies.just(b"y" * 32), + datav=strategies.lists( + # Limit the max size of these so we don't write *crazy* amounts of + # data to disk. + strategies.tuples(offsets(), strategies.binary(max_size=2 ** 8)), + max_size=2 ** 8, + ), + new_length=offsets(), + ) + def test_readv_reads_share_data(self, schema, nodeid, write_enabler, datav, new_length): + """ + ``MutableShareFile.readv`` returns bytes from the share data portion + of the share file. + """ + sf = self.get_sharefile(schema=schema) + sf.create(my_nodeid=nodeid, write_enabler=write_enabler) + sf.writev(datav=datav, new_length=new_length) + + # Apply all of the writes to a simple in-memory buffer so we can + # resolve the final state of the share data. In particular, this + # helps deal with overlapping writes which otherwise make it tricky to + # figure out what data to expect to be able to read back. + buf = BytesIO() + for (offset, data) in datav: + buf.seek(offset) + buf.write(data) + buf.truncate(new_length) + + # Using that buffer, determine the expected result of a readv for all + # of the data just written. + def read_from_buf(offset, length): + buf.seek(offset) + return buf.read(length) + expected_data = list( + read_from_buf(offset, len(data)) + for (offset, data) + in datav + ) + + # Perform a read that gives back all of the data written to the share + # file. + read_vectors = list((offset, len(data)) for (offset, data) in datav) + read_data = sf.readv(read_vectors) + + # Make sure the read reproduces the value we computed using our local + # buffer. + self.assertEqual(expected_data, read_data) + + @given( + schema=mutable_schemas, + nodeid=strategies.just(b"x" * 20), + write_enabler=strategies.just(b"y" * 32), + readv=strategies.lists(strategies.tuples(offsets(), lengths()), min_size=1), + random=strategies.randoms(), + ) + def test_readv_rejects_negative_length(self, schema, nodeid, write_enabler, readv, random): + """ + If a negative length is given to ``MutableShareFile.readv`` in a read + vector then ``AssertionError`` is raised. + """ + # Pick a read vector to break with a negative value + readv_index = random.randrange(len(readv)) + # Decide on whether we're breaking offset or length + offset_or_length = random.randrange(2) + + # A helper function that will take a valid offset and length and break + # one of them. + def corrupt(break_length, offset, length): + if break_length: + # length must not be 0 or flipping the sign does nothing + # length must not be negative or flipping the sign *fixes* it + assert length > 0 + return (offset, -length) + else: + if offset > 0: + # We can break offset just by flipping the sign. + return (-offset, length) + else: + # Otherwise it has to be zero. If it was negative, what's + # going on? + assert offset == 0 + # Since we can't just flip the sign on 0 to break things, + # replace a 0 offset with a simple negative value. All + # other negative values will be tested by the `offset > 0` + # case above. + return (-1, length) + + # Break the read vector very slightly! + broken_readv = readv[:] + broken_readv[readv_index] = corrupt( + offset_or_length, + *broken_readv[readv_index] + ) + + sf = self.get_sharefile(schema=schema) + sf.create(my_nodeid=nodeid, write_enabler=write_enabler) + + # A read with a broken read vector is an error. + with self.assertRaises(AssertionError): + sf.readv(broken_readv) + + +class LeaseInfoTests(SyncTestCase): + """ + Tests for ``allmydata.storage.lease.LeaseInfo``. + """ + def test_is_renew_secret(self): + """ + ``LeaseInfo.is_renew_secret`` returns ``True`` if the value given is the + renew secret. + """ + renew_secret = b"r" * 32 + cancel_secret = b"c" * 32 + lease = LeaseInfo( + owner_num=1, + renew_secret=renew_secret, + cancel_secret=cancel_secret, + ) + self.assertTrue(lease.is_renew_secret(renew_secret)) + + def test_is_not_renew_secret(self): + """ + ``LeaseInfo.is_renew_secret`` returns ``False`` if the value given is not + the renew secret. + """ + renew_secret = b"r" * 32 + cancel_secret = b"c" * 32 + lease = LeaseInfo( + owner_num=1, + renew_secret=renew_secret, + cancel_secret=cancel_secret, + ) + self.assertFalse(lease.is_renew_secret(cancel_secret)) + + def test_is_cancel_secret(self): + """ + ``LeaseInfo.is_cancel_secret`` returns ``True`` if the value given is the + cancel secret. + """ + renew_secret = b"r" * 32 + cancel_secret = b"c" * 32 + lease = LeaseInfo( + owner_num=1, + renew_secret=renew_secret, + cancel_secret=cancel_secret, + ) + self.assertTrue(lease.is_cancel_secret(cancel_secret)) + + def test_is_not_cancel_secret(self): + """ + ``LeaseInfo.is_cancel_secret`` returns ``False`` if the value given is not + the cancel secret. + """ + renew_secret = b"r" * 32 + cancel_secret = b"c" * 32 + lease = LeaseInfo( + owner_num=1, + renew_secret=renew_secret, + cancel_secret=cancel_secret, + ) + self.assertFalse(lease.is_cancel_secret(renew_secret)) + + @given( + strategies.tuples( + strategies.integers(min_value=0, max_value=2 ** 31 - 1), + strategies.binary(min_size=32, max_size=32), + strategies.binary(min_size=32, max_size=32), + strategies.integers(min_value=0, max_value=2 ** 31 - 1), + strategies.binary(min_size=20, max_size=20), + ), + ) + def test_immutable_size(self, initializer_args): + """ + ``LeaseInfo.immutable_size`` returns the length of the result of + ``LeaseInfo.to_immutable_data``. + + ``LeaseInfo.mutable_size`` returns the length of the result of + ``LeaseInfo.to_mutable_data``. + """ + info = LeaseInfo(*initializer_args) + self.expectThat( + info.to_immutable_data(), + HasLength(info.immutable_size()), + ) + self.expectThat( + info.to_mutable_data(), + HasLength(info.mutable_size()), + ) diff --git a/src/allmydata/test/test_storage_http.py b/src/allmydata/test/test_storage_http.py new file mode 100644 index 000000000..8dbe18545 --- /dev/null +++ b/src/allmydata/test/test_storage_http.py @@ -0,0 +1,1632 @@ +""" +Tests for HTTP storage client + server. + +The tests here are synchronous and don't involve running a real reactor. This +works, but has some caveats when it comes to testing HTTP endpoints: + +* Some HTTP endpoints are synchronous, some are not. +* For synchronous endpoints, the result is immediately available on the + ``Deferred`` coming out of ``StubTreq``. +* For asynchronous endpoints, you need to use ``StubTreq.flush()`` and + iterate the fake in-memory clock/reactor to advance time . + +So for HTTP endpoints, you should use ``HttpTestFixture.result_of_with_flush()`` +which handles both, and patches and moves forward the global Twisted +``Cooperator`` since that is used to drive pull producers. This is, +sadly, an internal implementation detail of Twisted being leaked to tests... + +For definitely synchronous calls, you can just use ``result_of()``. +""" + +from base64 import b64encode +from contextlib import contextmanager +from os import urandom +from typing import Union, Callable, Tuple, Iterable +from cbor2 import dumps +from pycddl import ValidationError as CDDLValidationError +from hypothesis import assume, given, strategies as st +from fixtures import Fixture, TempDir, MonkeyPatch +from treq.testing import StubTreq +from klein import Klein +from hyperlink import DecodedURL +from collections_extended import RangeMap +from twisted.internet.task import Clock, Cooperator +from twisted.internet.interfaces import IReactorTime +from twisted.internet.defer import CancelledError, Deferred +from twisted.web import http +from twisted.web.http_headers import Headers +from werkzeug import routing +from werkzeug.exceptions import NotFound as WNotFound + +from .common import SyncTestCase +from ..storage.http_common import get_content_type, CBOR_MIME_TYPE +from ..storage.common import si_b2a +from ..storage.lease import LeaseInfo +from ..storage.server import StorageServer +from ..storage.http_server import ( + HTTPServer, + _extract_secrets, + Secrets, + ClientSecretsException, + _authorized_route, + StorageIndexConverter, +) +from ..storage.http_client import ( + StorageClient, + ClientException, + StorageClientImmutables, + ImmutableCreateResult, + UploadProgress, + StorageClientGeneral, + _encode_si, + StorageClientMutables, + TestWriteVectors, + WriteVector, + ReadVector, + ReadTestWriteResult, + TestVector, + limited_content, +) + + +class HTTPUtilities(SyncTestCase): + """Tests for HTTP common utilities.""" + + def test_get_content_type(self): + """``get_content_type()`` extracts the content-type from the header.""" + + def assert_header_values_result(values, expected_content_type): + headers = Headers() + if values: + headers.setRawHeaders("Content-Type", values) + content_type = get_content_type(headers) + self.assertEqual(content_type, expected_content_type) + + assert_header_values_result(["text/html"], "text/html") + assert_header_values_result([], None) + assert_header_values_result(["text/plain", "application/json"], "text/plain") + assert_header_values_result(["text/html;encoding=utf-8"], "text/html") + + +def _post_process(params): + secret_types, secrets = params + secrets = {t: s for (t, s) in zip(secret_types, secrets)} + headers = [ + "{} {}".format( + secret_type.value, str(b64encode(secrets[secret_type]), "ascii").strip() + ) + for secret_type in secret_types + ] + return secrets, headers + + +# Creates a tuple of ({Secret enum value: secret_bytes}, [http headers with secrets]). +SECRETS_STRATEGY = ( + st.sets(st.sampled_from(Secrets)) + .flatmap( + lambda secret_types: st.tuples( + st.just(secret_types), + st.lists( + st.binary(min_size=32, max_size=32), + min_size=len(secret_types), + max_size=len(secret_types), + ), + ) + ) + .map(_post_process) +) + + +class ExtractSecretsTests(SyncTestCase): + """ + Tests for ``_extract_secrets``. + """ + + @given(secrets_to_send=SECRETS_STRATEGY) + def test_extract_secrets(self, secrets_to_send): + """ + ``_extract_secrets()`` returns a dictionary with the extracted secrets + if the input secrets match the required secrets. + """ + secrets, headers = secrets_to_send + + # No secrets needed, none given: + self.assertEqual(_extract_secrets(headers, secrets.keys()), secrets) + + @given( + secrets_to_send=SECRETS_STRATEGY, + secrets_to_require=st.sets(st.sampled_from(Secrets)), + ) + def test_wrong_number_of_secrets(self, secrets_to_send, secrets_to_require): + """ + If the wrong number of secrets are passed to ``_extract_secrets``, a + ``ClientSecretsException`` is raised. + """ + secrets_to_send, headers = secrets_to_send + assume(secrets_to_send.keys() != secrets_to_require) + + with self.assertRaises(ClientSecretsException): + _extract_secrets(headers, secrets_to_require) + + def test_bad_secret_missing_value(self): + """ + Missing value in ``_extract_secrets`` result in + ``ClientSecretsException``. + """ + with self.assertRaises(ClientSecretsException): + _extract_secrets(["lease-renew-secret"], {Secrets.LEASE_RENEW}) + + def test_bad_secret_unknown_prefix(self): + """ + Missing value in ``_extract_secrets`` result in + ``ClientSecretsException``. + """ + with self.assertRaises(ClientSecretsException): + _extract_secrets(["FOO eA=="], {}) + + def test_bad_secret_not_base64(self): + """ + A non-base64 value in ``_extract_secrets`` result in + ``ClientSecretsException``. + """ + with self.assertRaises(ClientSecretsException): + _extract_secrets(["lease-renew-secret x"], {Secrets.LEASE_RENEW}) + + def test_bad_secret_wrong_length_lease_renew(self): + """ + Lease renewal secrets must be 32-bytes long. + """ + with self.assertRaises(ClientSecretsException): + _extract_secrets(["lease-renew-secret eA=="], {Secrets.LEASE_RENEW}) + + def test_bad_secret_wrong_length_lease_cancel(self): + """ + Lease cancel secrets must be 32-bytes long. + """ + with self.assertRaises(ClientSecretsException): + _extract_secrets(["lease-cancel-secret eA=="], {Secrets.LEASE_RENEW}) + + +class RouteConverterTests(SyncTestCase): + """Tests for custom werkzeug path segment converters.""" + + adapter = routing.Map( + [ + routing.Rule( + "//", endpoint="si", methods=["GET"] + ) + ], + converters={"storage_index": StorageIndexConverter}, + ).bind("example.com", "/") + + @given(storage_index=st.binary(min_size=16, max_size=16)) + def test_good_storage_index_is_parsed(self, storage_index): + """ + A valid storage index is accepted and parsed back out by + StorageIndexConverter. + """ + self.assertEqual( + self.adapter.match( + "/{}/".format(str(si_b2a(storage_index), "ascii")), method="GET" + ), + ("si", {"storage_index": storage_index}), + ) + + def test_long_storage_index_is_not_parsed(self): + """An overly long storage_index string is not parsed.""" + with self.assertRaises(WNotFound): + self.adapter.match("/{}/".format("a" * 27), method="GET") + + def test_short_storage_index_is_not_parsed(self): + """An overly short storage_index string is not parsed.""" + with self.assertRaises(WNotFound): + self.adapter.match("/{}/".format("a" * 25), method="GET") + + def test_bad_characters_storage_index_is_not_parsed(self): + """A storage_index string with bad characters is not parsed.""" + with self.assertRaises(WNotFound): + self.adapter.match("/{}_/".format("a" * 25), method="GET") + + def test_invalid_storage_index_is_not_parsed(self): + """An invalid storage_index string is not parsed.""" + with self.assertRaises(WNotFound): + self.adapter.match("/nomd2a65ylxjbqzsw7gcfh4ivr/", method="GET") + + +# TODO should be actual swissnum +SWISSNUM_FOR_TEST = b"abcd" + + +def gen_bytes(length: int) -> bytes: + """Generate bytes to the given length.""" + result = (b"0123456789abcdef" * ((length // 16) + 1))[:length] + assert len(result) == length + return result + + +class TestApp(object): + """HTTP API for testing purposes.""" + + clock: IReactorTime + _app = Klein() + _swissnum = SWISSNUM_FOR_TEST # Match what the test client is using + + @_authorized_route(_app, {Secrets.UPLOAD}, "/upload_secret", methods=["GET"]) + def validate_upload_secret(self, request, authorization): + if authorization == {Secrets.UPLOAD: b"MAGIC"}: + return "GOOD SECRET" + else: + return "BAD: {}".format(authorization) + + @_authorized_route(_app, set(), "/storage/v1/version", methods=["GET"]) + def bad_version(self, request, authorization): + """Return version result that violates the expected schema.""" + request.setHeader("content-type", CBOR_MIME_TYPE) + return dumps({"garbage": 123}) + + @_authorized_route(_app, set(), "/bytes/", methods=["GET"]) + def generate_bytes(self, request, authorization, length): + """Return bytes to the given length using ``gen_bytes()``.""" + return gen_bytes(length) + + @_authorized_route(_app, set(), "/slowly_never_finish_result", methods=["GET"]) + def slowly_never_finish_result(self, request, authorization): + """ + Send data immediately, after 59 seconds, after another 59 seconds, and then + never again, without finishing the response. + """ + request.write(b"a") + self.clock.callLater(59, request.write, b"b") + self.clock.callLater(59 + 59, request.write, b"c") + return Deferred() + + @_authorized_route(_app, set(), "/die_unfinished", methods=["GET"]) + def die(self, request, authorization): + """ + Dies half-way. + """ + request.transport.loseConnection() + return Deferred() + + +def result_of(d): + """ + Synchronously extract the result of a Deferred. + """ + result = [] + error = [] + d.addCallbacks(result.append, error.append) + if result: + return result[0] + if error: + error[0].raiseException() + raise RuntimeError( + "We expected given Deferred to have result already, but it wasn't. " + + "This is probably a test design issue." + ) + + +class CustomHTTPServerTests(SyncTestCase): + """ + Tests that use a custom HTTP server. + """ + + def setUp(self): + super(CustomHTTPServerTests, self).setUp() + StorageClient.start_test_mode( + lambda pool: self.addCleanup(pool.closeCachedConnections) + ) + self.addCleanup(StorageClient.stop_test_mode) + # Could be a fixture, but will only be used in this test class so not + # going to bother: + self._http_server = TestApp() + treq = StubTreq(self._http_server._app.resource()) + self.client = StorageClient( + DecodedURL.from_text("http://127.0.0.1"), + SWISSNUM_FOR_TEST, + treq=treq, + # We're using a Treq private API to get the reactor, alas, but only + # in a test, so not going to worry about it too much. This would be + # fixed if https://github.com/twisted/treq/issues/226 were ever + # fixed. + clock=treq._agent._memoryReactor, + ) + self._http_server.clock = self.client._clock + + def test_authorization_enforcement(self): + """ + The requirement for secrets is enforced by the ``_authorized_route`` + decorator; if they are not given, a 400 response code is returned. + """ + # Without secret, get a 400 error. + response = result_of( + self.client.request( + "GET", + "http://127.0.0.1/upload_secret", + ) + ) + self.assertEqual(response.code, 400) + + # With secret, we're good. + response = result_of( + self.client.request( + "GET", "http://127.0.0.1/upload_secret", upload_secret=b"MAGIC" + ) + ) + self.assertEqual(response.code, 200) + self.assertEqual(result_of(response.content()), b"GOOD SECRET") + + def test_client_side_schema_validation(self): + """ + The client validates returned CBOR message against a schema. + """ + client = StorageClientGeneral(self.client) + with self.assertRaises(CDDLValidationError): + result_of(client.get_version()) + + @given(length=st.integers(min_value=1, max_value=1_000_000)) + def test_limited_content_fits(self, length): + """ + ``http_client.limited_content()`` returns the body if it is less than + the max length. + """ + for at_least_length in (length, length + 1, length + 1000, length + 100_000): + response = result_of( + self.client.request( + "GET", + f"http://127.0.0.1/bytes/{length}", + ) + ) + + self.assertEqual( + result_of( + limited_content(response, self._http_server.clock, at_least_length) + ).read(), + gen_bytes(length), + ) + + @given(length=st.integers(min_value=10, max_value=1_000_000)) + def test_limited_content_does_not_fit(self, length): + """ + If the body is longer than than max length, + ``http_client.limited_content()`` fails with a ``ValueError``. + """ + for too_short in (length - 1, 5): + response = result_of( + self.client.request( + "GET", + f"http://127.0.0.1/bytes/{length}", + ) + ) + + with self.assertRaises(ValueError): + result_of(limited_content(response, self._http_server.clock, too_short)) + + def test_limited_content_silence_causes_timeout(self): + """ + ``http_client.limited_content() times out if it receives no data for 60 + seconds. + """ + response = result_of( + self.client.request( + "GET", + "http://127.0.0.1/slowly_never_finish_result", + ) + ) + + body_deferred = limited_content(response, self._http_server.clock, 4) + result = [] + error = [] + body_deferred.addCallbacks(result.append, error.append) + + for i in range(59 + 59 + 60): + self.assertEqual((result, error), ([], [])) + self._http_server.clock.advance(1) + # Push data between in-memory client and in-memory server: + self.client._treq._agent.flush() + + # After 59 (second write) + 59 (third write) + 60 seconds (quiescent + # timeout) the limited_content() response times out. + self.assertTrue(error) + with self.assertRaises(CancelledError): + error[0].raiseException() + + def test_limited_content_cancels_timeout_on_failed_response(self): + """ + If the response fails somehow, the timeout is still cancelled. + """ + response = result_of( + self.client.request( + "GET", + "http://127.0.0.1/die", + ) + ) + + d = limited_content(response, self._http_server.clock, 4) + with self.assertRaises(ValueError): + result_of(d) + self.assertEqual(len(self._http_server.clock.getDelayedCalls()), 0) + + +class HttpTestFixture(Fixture): + """ + Setup HTTP tests' infrastructure, the storage server and corresponding + client. + """ + + def _setUp(self): + StorageClient.start_test_mode( + lambda pool: self.addCleanup(pool.closeCachedConnections) + ) + self.addCleanup(StorageClient.stop_test_mode) + self.clock = Clock() + self.tempdir = self.useFixture(TempDir()) + # The global Cooperator used by Twisted (a) used by pull producers in + # twisted.web, (b) is driven by a real reactor. We want to push time + # forward ourselves since we rely on pull producers in the HTTP storage + # server. + self.mock = self.useFixture( + MonkeyPatch( + "twisted.internet.task._theCooperator", + Cooperator(scheduler=lambda c: self.clock.callLater(0.000001, c)), + ) + ) + self.storage_server = StorageServer( + self.tempdir.path, b"\x00" * 20, clock=self.clock + ) + self.http_server = HTTPServer(self.storage_server, SWISSNUM_FOR_TEST) + self.treq = StubTreq(self.http_server.get_resource()) + self.client = StorageClient( + DecodedURL.from_text("http://127.0.0.1"), + SWISSNUM_FOR_TEST, + treq=self.treq, + clock=self.clock, + ) + + def result_of_with_flush(self, d): + """ + Like ``result_of``, but supports fake reactor and ``treq`` testing + infrastructure necessary to support asynchronous HTTP server endpoints. + """ + result = [] + error = [] + d.addCallbacks(result.append, error.append) + + # Check for synchronous HTTP endpoint handler: + if result: + return result[0] + if error: + error[0].raiseException() + + # OK, no result yet, probably async HTTP endpoint handler, so advance + # time, flush treq, and try again: + for i in range(100): + self.clock.advance(0.001) + self.treq.flush() + if result: + return result[0] + if error: + error[0].raiseException() + raise RuntimeError( + "We expected given Deferred to have result already, but it wasn't. " + + "This is probably a test design issue." + ) + + +class StorageClientWithHeadersOverride(object): + """Wrap ``StorageClient`` and override sent headers.""" + + def __init__(self, storage_client, add_headers): + self.storage_client = storage_client + self.add_headers = add_headers + + def __getattr__(self, attr): + return getattr(self.storage_client, attr) + + def request(self, *args, headers=None, **kwargs): + if headers is None: + headers = Headers() + for key, value in self.add_headers.items(): + headers.setRawHeaders(key, [value]) + return self.storage_client.request(*args, headers=headers, **kwargs) + + +@contextmanager +def assert_fails_with_http_code(test_case: SyncTestCase, code: int): + """ + Context manager that asserts the code fails with the given HTTP response + code. + """ + with test_case.assertRaises(ClientException) as e: + try: + yield + finally: + pass + test_case.assertEqual(e.exception.code, code) + + +class GenericHTTPAPITests(SyncTestCase): + """ + Tests of HTTP client talking to the HTTP server, for generic HTTP API + endpoints and concerns. + """ + + def setUp(self): + super(GenericHTTPAPITests, self).setUp() + self.http = self.useFixture(HttpTestFixture()) + + def test_bad_authentication(self): + """ + If the wrong swissnum is used, an ``Unauthorized`` response code is + returned. + """ + client = StorageClientGeneral( + StorageClient( + DecodedURL.from_text("http://127.0.0.1"), + b"something wrong", + treq=StubTreq(self.http.http_server.get_resource()), + clock=self.http.clock, + ) + ) + with assert_fails_with_http_code(self, http.UNAUTHORIZED): + self.http.result_of_with_flush(client.get_version()) + + def test_unsupported_mime_type(self): + """ + The client can request mime types other than CBOR, and if they are + unsupported a NOT ACCEPTABLE (406) error will be returned. + """ + client = StorageClientGeneral( + StorageClientWithHeadersOverride(self.http.client, {"accept": "image/gif"}) + ) + with assert_fails_with_http_code(self, http.NOT_ACCEPTABLE): + self.http.result_of_with_flush(client.get_version()) + + def test_version(self): + """ + The client can return the version. + + We ignore available disk space and max immutable share size, since that + might change across calls. + """ + client = StorageClientGeneral(self.http.client) + version = self.http.result_of_with_flush(client.get_version()) + version[b"http://allmydata.org/tahoe/protocols/storage/v1"].pop( + b"available-space" + ) + version[b"http://allmydata.org/tahoe/protocols/storage/v1"].pop( + b"maximum-immutable-share-size" + ) + expected_version = self.http.storage_server.get_version() + expected_version[b"http://allmydata.org/tahoe/protocols/storage/v1"].pop( + b"available-space" + ) + expected_version[b"http://allmydata.org/tahoe/protocols/storage/v1"].pop( + b"maximum-immutable-share-size" + ) + self.assertEqual(version, expected_version) + + def test_server_side_schema_validation(self): + """ + Ensure that schema validation is happening: invalid CBOR should result + in bad request response code (error 400). + + We don't bother checking every single request, the API on the + server-side is designed to require a schema, so it validates + everywhere. But we check at least one to ensure we get correct + response code on bad input, so we know validation happened. + """ + upload_secret = urandom(32) + lease_secret = urandom(32) + storage_index = urandom(16) + url = self.http.client.relative_url( + "/storage/v1/immutable/" + _encode_si(storage_index) + ) + message = {"bad-message": "missing expected keys"} + + response = self.http.result_of_with_flush( + self.http.client.request( + "POST", + url, + lease_renew_secret=lease_secret, + lease_cancel_secret=lease_secret, + upload_secret=upload_secret, + message_to_serialize=message, + ) + ) + self.assertEqual(response.code, http.BAD_REQUEST) + + +class ImmutableHTTPAPITests(SyncTestCase): + """ + Tests for immutable upload/download APIs. + """ + + def setUp(self): + super(ImmutableHTTPAPITests, self).setUp() + self.http = self.useFixture(HttpTestFixture()) + self.imm_client = StorageClientImmutables(self.http.client) + self.general_client = StorageClientGeneral(self.http.client) + + def create_upload(self, share_numbers, length): + """ + Create a write bucket on server, return: + + (upload_secret, lease_secret, storage_index, result) + """ + upload_secret = urandom(32) + lease_secret = urandom(32) + storage_index = urandom(16) + created = self.http.result_of_with_flush( + self.imm_client.create( + storage_index, + share_numbers, + length, + upload_secret, + lease_secret, + lease_secret, + ) + ) + return (upload_secret, lease_secret, storage_index, created) + + def test_upload_can_be_downloaded(self): + """ + A single share can be uploaded in (possibly overlapping) chunks, and + then a random chunk can be downloaded, and it will match the original + file. + + We don't exercise the full variation of overlapping chunks because + that's already done in test_storage.py. + """ + length = 100 + expected_data = bytes(range(100)) + + # Create a upload: + (upload_secret, _, storage_index, created) = self.create_upload({1}, 100) + self.assertEqual( + created, ImmutableCreateResult(already_have=set(), allocated={1}) + ) + + remaining = RangeMap() + remaining.set(True, 0, 100) + + # Three writes: 10-19, 30-39, 50-59. This allows for a bunch of holes. + def write(offset, length): + remaining.empty(offset, offset + length) + return self.imm_client.write_share_chunk( + storage_index, + 1, + upload_secret, + offset, + expected_data[offset : offset + length], + ) + + upload_progress = self.http.result_of_with_flush(write(10, 10)) + self.assertEqual( + upload_progress, UploadProgress(finished=False, required=remaining) + ) + upload_progress = self.http.result_of_with_flush(write(30, 10)) + self.assertEqual( + upload_progress, UploadProgress(finished=False, required=remaining) + ) + upload_progress = self.http.result_of_with_flush(write(50, 10)) + self.assertEqual( + upload_progress, UploadProgress(finished=False, required=remaining) + ) + + # Then, an overlapping write with matching data (15-35): + upload_progress = self.http.result_of_with_flush(write(15, 20)) + self.assertEqual( + upload_progress, UploadProgress(finished=False, required=remaining) + ) + + # Now fill in the holes: + upload_progress = self.http.result_of_with_flush(write(0, 10)) + self.assertEqual( + upload_progress, UploadProgress(finished=False, required=remaining) + ) + upload_progress = self.http.result_of_with_flush(write(40, 10)) + self.assertEqual( + upload_progress, UploadProgress(finished=False, required=remaining) + ) + upload_progress = self.http.result_of_with_flush(write(60, 40)) + self.assertEqual( + upload_progress, UploadProgress(finished=True, required=RangeMap()) + ) + + # We can now read: + for offset, length in [(0, 100), (10, 19), (99, 1), (49, 200)]: + downloaded = self.http.result_of_with_flush( + self.imm_client.read_share_chunk(storage_index, 1, offset, length) + ) + self.assertEqual(downloaded, expected_data[offset : offset + length]) + + def test_write_with_wrong_upload_key(self): + """ + A write with an upload key that is different than the original upload + key will fail. + """ + (upload_secret, _, storage_index, _) = self.create_upload({1}, 100) + with assert_fails_with_http_code(self, http.UNAUTHORIZED): + self.http.result_of_with_flush( + self.imm_client.write_share_chunk( + storage_index, + 1, + upload_secret + b"X", + 0, + b"123", + ) + ) + + def test_allocate_buckets_second_time_different_shares(self): + """ + If allocate buckets endpoint is called second time with different + upload key on potentially different shares, that creates the buckets on + those shares that are different. + """ + # Create a upload: + (upload_secret, lease_secret, storage_index, created) = self.create_upload( + {1, 2, 3}, 100 + ) + + # Write half of share 1 + self.http.result_of_with_flush( + self.imm_client.write_share_chunk( + storage_index, + 1, + upload_secret, + 0, + b"a" * 50, + ) + ) + + # Add same shares with a different upload key share 1 overlaps with + # existing shares, this call shouldn't overwrite the existing + # work-in-progress. + upload_secret2 = b"x" * 2 + created2 = self.http.result_of_with_flush( + self.imm_client.create( + storage_index, + {1, 4, 6}, + 100, + upload_secret2, + lease_secret, + lease_secret, + ) + ) + self.assertEqual(created2.allocated, {4, 6}) + + # Write second half of share 1 + self.assertTrue( + self.http.result_of_with_flush( + self.imm_client.write_share_chunk( + storage_index, + 1, + upload_secret, + 50, + b"b" * 50, + ) + ).finished + ) + + # The upload of share 1 succeeded, demonstrating that second create() + # call didn't overwrite work-in-progress. + downloaded = self.http.result_of_with_flush( + self.imm_client.read_share_chunk(storage_index, 1, 0, 100) + ) + self.assertEqual(downloaded, b"a" * 50 + b"b" * 50) + + # We can successfully upload the shares created with the second upload secret. + self.assertTrue( + self.http.result_of_with_flush( + self.imm_client.write_share_chunk( + storage_index, + 4, + upload_secret2, + 0, + b"x" * 100, + ) + ).finished + ) + + def test_list_shares(self): + """ + Once a share is finished uploading, it's possible to list it. + """ + (upload_secret, _, storage_index, created) = self.create_upload({1, 2, 3}, 10) + + # Initially there are no shares: + self.assertEqual( + self.http.result_of_with_flush(self.imm_client.list_shares(storage_index)), + set(), + ) + + # Upload shares 1 and 3: + for share_number in [1, 3]: + progress = self.http.result_of_with_flush( + self.imm_client.write_share_chunk( + storage_index, + share_number, + upload_secret, + 0, + b"0123456789", + ) + ) + self.assertTrue(progress.finished) + + # Now shares 1 and 3 exist: + self.assertEqual( + self.http.result_of_with_flush(self.imm_client.list_shares(storage_index)), + {1, 3}, + ) + + def test_upload_bad_content_range(self): + """ + Malformed or invalid Content-Range headers to the immutable upload + endpoint result in a 416 error. + """ + (upload_secret, _, storage_index, created) = self.create_upload({1}, 10) + + def check_invalid(bad_content_range_value): + client = StorageClientImmutables( + StorageClientWithHeadersOverride( + self.http.client, {"content-range": bad_content_range_value} + ) + ) + with assert_fails_with_http_code( + self, http.REQUESTED_RANGE_NOT_SATISFIABLE + ): + self.http.result_of_with_flush( + client.write_share_chunk( + storage_index, + 1, + upload_secret, + 0, + b"0123456789", + ) + ) + + check_invalid("not a valid content-range header at all") + check_invalid("bytes -1-9/10") + check_invalid("bytes 0--9/10") + check_invalid("teapots 0-9/10") + + def test_list_shares_unknown_storage_index(self): + """ + Listing unknown storage index's shares results in empty list of shares. + """ + storage_index = bytes(range(16)) + self.assertEqual( + self.http.result_of_with_flush(self.imm_client.list_shares(storage_index)), + set(), + ) + + def test_upload_non_existent_storage_index(self): + """ + Uploading to a non-existent storage index or share number results in + 404. + """ + (upload_secret, _, storage_index, _) = self.create_upload({1}, 10) + + def unknown_check(storage_index, share_number): + with assert_fails_with_http_code(self, http.NOT_FOUND): + self.http.result_of_with_flush( + self.imm_client.write_share_chunk( + storage_index, + share_number, + upload_secret, + 0, + b"0123456789", + ) + ) + + # Wrong share number: + unknown_check(storage_index, 7) + # Wrong storage index: + unknown_check(b"X" * 16, 7) + + def test_multiple_shares_uploaded_to_different_place(self): + """ + If a storage index has multiple shares, uploads to different shares are + stored separately and can be downloaded separately. + """ + (upload_secret, _, storage_index, _) = self.create_upload({1, 2}, 10) + self.http.result_of_with_flush( + self.imm_client.write_share_chunk( + storage_index, + 1, + upload_secret, + 0, + b"1" * 10, + ) + ) + self.http.result_of_with_flush( + self.imm_client.write_share_chunk( + storage_index, + 2, + upload_secret, + 0, + b"2" * 10, + ) + ) + self.assertEqual( + self.http.result_of_with_flush( + self.imm_client.read_share_chunk(storage_index, 1, 0, 10) + ), + b"1" * 10, + ) + self.assertEqual( + self.http.result_of_with_flush( + self.imm_client.read_share_chunk(storage_index, 2, 0, 10) + ), + b"2" * 10, + ) + + def test_mismatching_upload_fails(self): + """ + If an uploaded chunk conflicts with an already uploaded chunk, a + CONFLICT error is returned. + """ + (upload_secret, _, storage_index, created) = self.create_upload({1}, 100) + + # Write: + self.http.result_of_with_flush( + self.imm_client.write_share_chunk( + storage_index, + 1, + upload_secret, + 0, + b"0" * 10, + ) + ) + + # Conflicting write: + with assert_fails_with_http_code(self, http.CONFLICT): + self.http.result_of_with_flush( + self.imm_client.write_share_chunk( + storage_index, + 1, + upload_secret, + 0, + b"0123456789", + ) + ) + + def test_timed_out_upload_allows_reupload(self): + """ + If an in-progress upload times out, it is cancelled altogether, + allowing a new upload to occur. + """ + self._test_abort_or_timed_out_upload_to_existing_storage_index( + lambda **kwargs: self.http.clock.advance(30 * 60 + 1) + ) + + def test_abort_upload_allows_reupload(self): + """ + If an in-progress upload is aborted, it is cancelled altogether, + allowing a new upload to occur. + """ + + def abort(storage_index, share_number, upload_secret): + return self.http.result_of_with_flush( + self.imm_client.abort_upload(storage_index, share_number, upload_secret) + ) + + self._test_abort_or_timed_out_upload_to_existing_storage_index(abort) + + def _test_abort_or_timed_out_upload_to_existing_storage_index(self, cancel_upload): + """Start uploading to an existing storage index that then times out or aborts. + + Re-uploading should work. + """ + # Start an upload: + (upload_secret, _, storage_index, _) = self.create_upload({1}, 100) + self.http.result_of_with_flush( + self.imm_client.write_share_chunk( + storage_index, + 1, + upload_secret, + 0, + b"123", + ) + ) + + # Now, the upload is cancelled somehow: + cancel_upload( + storage_index=storage_index, upload_secret=upload_secret, share_number=1 + ) + + # Now we can create a new share with the same storage index without + # complaint: + upload_secret = urandom(32) + lease_secret = urandom(32) + created = self.http.result_of_with_flush( + self.imm_client.create( + storage_index, + {1}, + 100, + upload_secret, + lease_secret, + lease_secret, + ) + ) + self.assertEqual(created.allocated, {1}) + + # And write to it, too: + self.http.result_of_with_flush( + self.imm_client.write_share_chunk( + storage_index, + 1, + upload_secret, + 0, + b"ABC", + ) + ) + + def test_unknown_aborts(self): + """ + Aborting uploads with an unknown storage index or share number will + result 404 HTTP response code. + """ + (upload_secret, _, storage_index, _) = self.create_upload({1}, 100) + + for si, num in [(storage_index, 3), (b"x" * 16, 1)]: + with assert_fails_with_http_code(self, http.NOT_FOUND): + self.http.result_of_with_flush( + self.imm_client.abort_upload(si, num, upload_secret) + ) + + def test_unauthorized_abort(self): + """ + An abort with the wrong key will return an unauthorized error, and will + not abort the upload. + """ + (upload_secret, _, storage_index, _) = self.create_upload({1}, 100) + + # Failed to abort becaues wrong upload secret: + with assert_fails_with_http_code(self, http.UNAUTHORIZED): + self.http.result_of_with_flush( + self.imm_client.abort_upload(storage_index, 1, upload_secret + b"X") + ) + + # We can still write to it: + self.http.result_of_with_flush( + self.imm_client.write_share_chunk( + storage_index, + 1, + upload_secret, + 0, + b"ABC", + ) + ) + + def test_too_late_abort(self): + """ + An abort of an already-fully-uploaded immutable will result in 405 + error and will not affect the immutable. + """ + uploaded_data = b"123" + (upload_secret, _, storage_index, _) = self.create_upload({0}, 3) + self.http.result_of_with_flush( + self.imm_client.write_share_chunk( + storage_index, + 0, + upload_secret, + 0, + uploaded_data, + ) + ) + + # Can't abort, we finished upload: + with assert_fails_with_http_code(self, http.NOT_ALLOWED): + self.http.result_of_with_flush( + self.imm_client.abort_upload(storage_index, 0, upload_secret) + ) + + # Abort didn't prevent reading: + self.assertEqual( + uploaded_data, + self.http.result_of_with_flush( + self.imm_client.read_share_chunk( + storage_index, + 0, + 0, + 3, + ) + ), + ) + + def test_lease_on_unknown_storage_index(self): + """ + An attempt to renew an unknown storage index will result in a HTTP 404. + """ + storage_index = urandom(16) + secret = b"A" * 32 + with assert_fails_with_http_code(self, http.NOT_FOUND): + self.http.result_of_with_flush( + self.general_client.add_or_renew_lease(storage_index, secret, secret) + ) + + +class MutableHTTPAPIsTests(SyncTestCase): + """Tests for mutable APIs.""" + + def setUp(self): + super(MutableHTTPAPIsTests, self).setUp() + self.http = self.useFixture(HttpTestFixture()) + self.mut_client = StorageClientMutables(self.http.client) + + def create_upload(self, data=b"abcdef"): + """ + Utility that creates shares 0 and 1 with bodies + ``{data}-{share_number}``. + """ + write_secret = urandom(32) + lease_secret = urandom(32) + storage_index = urandom(16) + self.http.result_of_with_flush( + self.mut_client.read_test_write_chunks( + storage_index, + write_secret, + lease_secret, + lease_secret, + { + 0: TestWriteVectors( + write_vectors=[WriteVector(offset=0, data=data + b"-0")] + ), + 1: TestWriteVectors( + write_vectors=[ + WriteVector(offset=0, data=data), + WriteVector(offset=len(data), data=b"-1"), + ] + ), + }, + [], + ) + ) + return storage_index, write_secret, lease_secret + + def test_write_can_be_read(self): + """ + Written data can be read using ``read_share_chunk``. + """ + storage_index, _, _ = self.create_upload() + data0 = self.http.result_of_with_flush( + self.mut_client.read_share_chunk(storage_index, 0, 1, 7) + ) + data1 = self.http.result_of_with_flush( + self.mut_client.read_share_chunk(storage_index, 1, 0, 8) + ) + self.assertEqual((data0, data1), (b"bcdef-0", b"abcdef-1")) + + def test_read_before_write(self): + """In combo read/test/write operation, reads happen before writes.""" + storage_index, write_secret, lease_secret = self.create_upload() + result = self.http.result_of_with_flush( + self.mut_client.read_test_write_chunks( + storage_index, + write_secret, + lease_secret, + lease_secret, + { + 0: TestWriteVectors( + write_vectors=[WriteVector(offset=1, data=b"XYZ")] + ), + }, + [ReadVector(0, 8)], + ) + ) + # Reads are from before the write: + self.assertEqual( + result, + ReadTestWriteResult( + success=True, reads={0: [b"abcdef-0"], 1: [b"abcdef-1"]} + ), + ) + # But the write did happen: + data0 = self.http.result_of_with_flush( + self.mut_client.read_share_chunk(storage_index, 0, 0, 8) + ) + data1 = self.http.result_of_with_flush( + self.mut_client.read_share_chunk(storage_index, 1, 0, 8) + ) + self.assertEqual((data0, data1), (b"aXYZef-0", b"abcdef-1")) + + def test_conditional_write(self): + """Uploads only happen if the test passes.""" + storage_index, write_secret, lease_secret = self.create_upload() + result_failed = self.http.result_of_with_flush( + self.mut_client.read_test_write_chunks( + storage_index, + write_secret, + lease_secret, + lease_secret, + { + 0: TestWriteVectors( + test_vectors=[TestVector(1, 4, b"FAIL")], + write_vectors=[WriteVector(offset=1, data=b"XYZ")], + ), + }, + [], + ) + ) + self.assertFalse(result_failed.success) + + # This time the test matches: + result = self.http.result_of_with_flush( + self.mut_client.read_test_write_chunks( + storage_index, + write_secret, + lease_secret, + lease_secret, + { + 0: TestWriteVectors( + test_vectors=[TestVector(1, 4, b"bcde")], + write_vectors=[WriteVector(offset=1, data=b"XYZ")], + ), + }, + [ReadVector(0, 8)], + ) + ) + self.assertTrue(result.success) + self.assertEqual( + self.http.result_of_with_flush( + self.mut_client.read_share_chunk(storage_index, 0, 0, 8) + ), + b"aXYZef-0", + ) + + def test_too_large_write(self): + """ + Writing too large of a chunk results in a REQUEST ENTITY TOO LARGE http + error. + """ + with self.assertRaises(ClientException) as e: + self.create_upload(b"0123456789" * 1024 * 1024) + self.assertEqual(e.exception.code, http.REQUEST_ENTITY_TOO_LARGE) + + def test_list_shares(self): + """``list_shares()`` returns the shares for a given storage index.""" + storage_index, _, _ = self.create_upload() + self.assertEqual( + self.http.result_of_with_flush(self.mut_client.list_shares(storage_index)), + {0, 1}, + ) + + def test_non_existent_list_shares(self): + """A non-existent storage index errors when shares are listed.""" + with self.assertRaises(ClientException) as exc: + self.http.result_of_with_flush(self.mut_client.list_shares(urandom(32))) + self.assertEqual(exc.exception.code, http.NOT_FOUND) + + def test_wrong_write_enabler(self): + """Writes with the wrong write enabler fail, and are not processed.""" + storage_index, write_secret, lease_secret = self.create_upload() + with self.assertRaises(ClientException) as exc: + self.http.result_of_with_flush( + self.mut_client.read_test_write_chunks( + storage_index, + urandom(32), + lease_secret, + lease_secret, + { + 0: TestWriteVectors( + write_vectors=[WriteVector(offset=1, data=b"XYZ")] + ), + }, + [ReadVector(0, 8)], + ) + ) + self.assertEqual(exc.exception.code, http.UNAUTHORIZED) + + # The write did not happen: + self.assertEqual( + self.http.result_of_with_flush( + self.mut_client.read_share_chunk(storage_index, 0, 0, 8) + ), + b"abcdef-0", + ) + + +class SharedImmutableMutableTestsMixin: + """ + Shared tests for mutables and immutables where the API is the same. + """ + + KIND: str # either "mutable" or "immutable" + general_client: StorageClientGeneral + client: Union[StorageClientImmutables, StorageClientMutables] + clientFactory: Callable[ + [StorageClient], Union[StorageClientImmutables, StorageClientMutables] + ] + + def upload(self, share_number: int, data_length=26) -> Tuple[bytes, bytes, bytes]: + """ + Create a share, return (storage_index, uploaded_data, lease secret). + """ + raise NotImplementedError + + def get_leases(self, storage_index: bytes) -> Iterable[LeaseInfo]: + """Get leases for the storage index.""" + raise NotImplementedError() + + def test_advise_corrupt_share(self): + """ + Advising share was corrupted succeeds from HTTP client's perspective, + and calls appropriate method on server. + """ + corrupted = [] + self.http.storage_server.advise_corrupt_share = lambda *args: corrupted.append( + args + ) + + storage_index, _, _ = self.upload(13) + reason = "OHNO \u1235" + self.http.result_of_with_flush( + self.client.advise_corrupt_share(storage_index, 13, reason) + ) + + self.assertEqual( + corrupted, + [(self.KIND.encode("ascii"), storage_index, 13, reason.encode("utf-8"))], + ) + + def test_advise_corrupt_share_unknown(self): + """ + Advising an unknown share was corrupted results in 404. + """ + storage_index, _, _ = self.upload(13) + reason = "OHNO \u1235" + self.http.result_of_with_flush( + self.client.advise_corrupt_share(storage_index, 13, reason) + ) + + for (si, share_number) in [(storage_index, 11), (urandom(16), 13)]: + with assert_fails_with_http_code(self, http.NOT_FOUND): + self.http.result_of_with_flush( + self.client.advise_corrupt_share(si, share_number, reason) + ) + + def test_lease_renew_and_add(self): + """ + It's possible the renew the lease on an uploaded mutable/immutable, by + using the same renewal secret, or add a new lease by choosing a + different renewal secret. + """ + # Create a storage index: + storage_index, _, lease_secret = self.upload(0) + + [lease] = self.get_leases(storage_index) + initial_expiration_time = lease.get_expiration_time() + + # Time passes: + self.http.clock.advance(167) + + # We renew the lease: + self.http.result_of_with_flush( + self.general_client.add_or_renew_lease( + storage_index, lease_secret, lease_secret + ) + ) + + # More time passes: + self.http.clock.advance(10) + + # We create a new lease: + lease_secret2 = urandom(32) + self.http.result_of_with_flush( + self.general_client.add_or_renew_lease( + storage_index, lease_secret2, lease_secret2 + ) + ) + + [lease1, lease2] = self.get_leases(storage_index) + self.assertEqual(lease1.get_expiration_time(), initial_expiration_time + 167) + self.assertEqual(lease2.get_expiration_time(), initial_expiration_time + 177) + + def test_read_of_wrong_storage_index_fails(self): + """ + Reading from unknown storage index results in 404. + """ + with assert_fails_with_http_code(self, http.NOT_FOUND): + self.http.result_of_with_flush( + self.client.read_share_chunk( + b"1" * 16, + 1, + 0, + 10, + ) + ) + + def test_read_of_wrong_share_number_fails(self): + """ + Reading from unknown storage index results in 404. + """ + storage_index, _, _ = self.upload(1) + with assert_fails_with_http_code(self, http.NOT_FOUND): + self.http.result_of_with_flush( + self.client.read_share_chunk( + storage_index, + 7, # different share number + 0, + 10, + ) + ) + + def test_read_with_negative_offset_fails(self): + """ + Malformed or unsupported Range headers result in 416 (requested range + not satisfiable) error. + """ + storage_index, _, _ = self.upload(1) + + def check_bad_range(bad_range_value): + client = self.clientFactory( + StorageClientWithHeadersOverride( + self.http.client, {"range": bad_range_value} + ) + ) + + with assert_fails_with_http_code( + self, http.REQUESTED_RANGE_NOT_SATISFIABLE + ): + self.http.result_of_with_flush( + client.read_share_chunk( + storage_index, + 1, + 0, + 10, + ) + ) + + # Bad unit + check_bad_range("molluscs=0-9") + # Negative offsets + check_bad_range("bytes=-2-9") + check_bad_range("bytes=0--10") + # Negative offset no endpoint + check_bad_range("bytes=-300-") + check_bad_range("bytes=") + # Multiple ranges are currently unsupported, even if they're + # semantically valid under HTTP: + check_bad_range("bytes=0-5, 6-7") + # Ranges without an end are currently unsupported, even if they're + # semantically valid under HTTP. + check_bad_range("bytes=0-") + + @given(data_length=st.integers(min_value=1, max_value=300000)) + def test_read_with_no_range(self, data_length): + """ + A read with no range returns the whole mutable/immutable. + """ + storage_index, uploaded_data, _ = self.upload(1, data_length) + response = self.http.result_of_with_flush( + self.http.client.request( + "GET", + self.http.client.relative_url( + "/storage/v1/{}/{}/1".format(self.KIND, _encode_si(storage_index)) + ), + ) + ) + self.assertEqual(response.code, http.OK) + self.assertEqual( + self.http.result_of_with_flush(response.content()), uploaded_data + ) + + def test_validate_content_range_response_to_read(self): + """ + The server responds to ranged reads with an appropriate Content-Range + header. + """ + storage_index, _, _ = self.upload(1, 26) + + def check_range(requested_range, expected_response): + headers = Headers() + headers.setRawHeaders("range", [requested_range]) + response = self.http.result_of_with_flush( + self.http.client.request( + "GET", + self.http.client.relative_url( + "/storage/v1/{}/{}/1".format( + self.KIND, _encode_si(storage_index) + ) + ), + headers=headers, + ) + ) + self.assertEqual( + response.headers.getRawHeaders("content-range"), [expected_response] + ) + + check_range("bytes=0-10", "bytes 0-10/*") + check_range("bytes=3-17", "bytes 3-17/*") + # TODO re-enable in https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3907 + # Can't go beyond the end of the mutable/immutable! + # check_range("bytes=10-100", "bytes 10-25/*") + + +class ImmutableSharedTests(SharedImmutableMutableTestsMixin, SyncTestCase): + """Shared tests, running on immutables.""" + + KIND = "immutable" + clientFactory = StorageClientImmutables + + def setUp(self): + super(ImmutableSharedTests, self).setUp() + self.http = self.useFixture(HttpTestFixture()) + self.client = self.clientFactory(self.http.client) + self.general_client = StorageClientGeneral(self.http.client) + + def upload(self, share_number, data_length=26): + """ + Create a share, return (storage_index, uploaded_data, lease_secret). + """ + uploaded_data = (b"abcdefghijklmnopqrstuvwxyz" * ((data_length // 26) + 1))[ + :data_length + ] + upload_secret = urandom(32) + lease_secret = urandom(32) + storage_index = urandom(16) + self.http.result_of_with_flush( + self.client.create( + storage_index, + {share_number}, + data_length, + upload_secret, + lease_secret, + lease_secret, + ) + ) + self.http.result_of_with_flush( + self.client.write_share_chunk( + storage_index, + share_number, + upload_secret, + 0, + uploaded_data, + ) + ) + return storage_index, uploaded_data, lease_secret + + def get_leases(self, storage_index): + return self.http.storage_server.get_leases(storage_index) + + +class MutableSharedTests(SharedImmutableMutableTestsMixin, SyncTestCase): + """Shared tests, running on mutables.""" + + KIND = "mutable" + clientFactory = StorageClientMutables + + def setUp(self): + super(MutableSharedTests, self).setUp() + self.http = self.useFixture(HttpTestFixture()) + self.client = self.clientFactory(self.http.client) + self.general_client = StorageClientGeneral(self.http.client) + + def upload(self, share_number, data_length=26): + """ + Create a share, return (storage_index, uploaded_data, lease_secret). + """ + data = (b"abcdefghijklmnopqrstuvwxyz" * ((data_length // 26) + 1))[:data_length] + write_secret = urandom(32) + lease_secret = urandom(32) + storage_index = urandom(16) + self.http.result_of_with_flush( + self.client.read_test_write_chunks( + storage_index, + write_secret, + lease_secret, + lease_secret, + { + share_number: TestWriteVectors( + write_vectors=[WriteVector(offset=0, data=data)] + ), + }, + [], + ) + ) + return storage_index, data, lease_secret + + def get_leases(self, storage_index): + return self.http.storage_server.get_slot_leases(storage_index) diff --git a/src/allmydata/test/test_storage_https.py b/src/allmydata/test/test_storage_https.py new file mode 100644 index 000000000..a11b0eed5 --- /dev/null +++ b/src/allmydata/test/test_storage_https.py @@ -0,0 +1,206 @@ +""" +Tests for the TLS part of the HTTP Storage Protocol. + +More broadly, these are tests for HTTPS usage as replacement for Foolscap's +server authentication logic, which may one day apply outside of HTTP Storage +Protocol. +""" + +from contextlib import asynccontextmanager + +from cryptography import x509 + +from twisted.internet.endpoints import serverFromString +from twisted.internet import reactor +from twisted.internet.defer import maybeDeferred +from twisted.web.server import Site +from twisted.web.static import Data +from twisted.web.client import Agent, HTTPConnectionPool, ResponseNeverReceived +from twisted.python.filepath import FilePath +from treq.client import HTTPClient + +from .common import SyncTestCase, AsyncTestCase, SameProcessStreamEndpointAssigner +from .certs import ( + generate_certificate, + generate_private_key, + private_key_to_file, + cert_to_file, +) +from ..storage.http_common import get_spki_hash +from ..storage.http_client import _StorageClientHTTPSPolicy +from ..storage.http_server import _TLSEndpointWrapper +from ..util.deferredutil import async_to_deferred +from .common_system import spin_until_cleanup_done + + +class HTTPSNurlTests(SyncTestCase): + """Tests for HTTPS NURLs.""" + + def test_spki_hash(self): + """The output of ``get_spki_hash()`` matches the semantics of RFC 7469. + + The expected hash was generated using Appendix A instructions in the + RFC:: + + openssl x509 -noout -in certificate.pem -pubkey | \ + openssl asn1parse -noout -inform pem -out public.key + openssl dgst -sha256 -binary public.key | openssl enc -base64 + """ + expected_hash = b"JIj6ezHkdSBlHhrnezAgIC_mrVQHy4KAFyL-8ZNPGPM" + certificate_text = b"""\ +-----BEGIN CERTIFICATE----- +MIIDWTCCAkECFCf+I+3oEhTfqt+6ruH4qQ4Wst1DMA0GCSqGSIb3DQEBCwUAMGkx +CzAJBgNVBAYTAlpaMRAwDgYDVQQIDAdOb3doZXJlMRQwEgYDVQQHDAtFeGFtcGxl +dG93bjEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDEUMBIGA1UEAwwLZXhh +bXBsZS5jb20wHhcNMjIwMzAyMTUyNTQ3WhcNMjMwMzAyMTUyNTQ3WjBpMQswCQYD +VQQGEwJaWjEQMA4GA1UECAwHTm93aGVyZTEUMBIGA1UEBwwLRXhhbXBsZXRvd24x +HDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxFDASBgNVBAMMC2V4YW1wbGUu +Y29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv9vqtA8Toy9D6xLG +q41iUafSiAXnuirWxML2ct/LAcGJzATg6JctmJxxZQL7vkmaFFPBF6Y39bOGbbEC +M2iQYn2Qemj5fl3IzKTnYLqzryGM0ZwwnNbPyetSe/sksAIYRLzn49d6l+AHR+Dj +GyvoLzIyGUTn41MTDafMNtPgWx1i+65lFW3GHYpEmugu4bjeUPizNja2LrqwvwFu +YXwmKxbIMdioCoRvDGX9SI3/euFstuR4rbOEUDxniYRF5g6reP8UMF30zJzF5j0k +yDg8Z5b1XpKFNZAeyRYxcs9wJCqVlP6BLPDnvNVpMXodnWLeTK+r6YWvGadGVufk +YNC1PwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQByrhn78GSS3dJ0pJ6czmhMX5wH ++fauCtt1+Wbn+ctTodTycS+pfULO4gG7wRzhl8KNoOqLmWMjyA2A3mon8kdkD+0C +i8McpoPaGS2wQcqC28Ud6kP9YO81YFyTl4nHVKQ0nmplT+eoLDTCIWMVxHHzxIgs +2ybUluAc+THSjpGxB6kWSAJeg3N+f2OKr+07Yg9LiQ2b8y0eZarpiuuuXCzWeWrQ +PudP0aniyq/gbPhxq0tYF628IBvhDAnr/2kqEmVF2TDr2Sm/Y3PDBuPY6MeIxjnr +ox5zO3LrQmQw11OaIAs2/kviKAoKTFFxeyYcpS5RuKNDZfHQCXlLwt9bySxG +-----END CERTIFICATE----- +""" + certificate = x509.load_pem_x509_certificate(certificate_text) + self.assertEqual(get_spki_hash(certificate), expected_hash) + + +class PinningHTTPSValidation(AsyncTestCase): + """ + Test client-side validation logic of HTTPS certificates that uses + Tahoe-LAFS's pinning-based scheme instead of the traditional certificate + authority scheme. + + https://cryptography.io/en/latest/x509/tutorial/#creating-a-self-signed-certificate + """ + + def setUp(self): + self._port_assigner = SameProcessStreamEndpointAssigner() + self._port_assigner.setUp() + self.addCleanup(self._port_assigner.tearDown) + return AsyncTestCase.setUp(self) + + def tearDown(self): + d = maybeDeferred(AsyncTestCase.tearDown, self) + return d.addCallback(lambda _: spin_until_cleanup_done()) + + @asynccontextmanager + async def listen(self, private_key_path: FilePath, cert_path: FilePath): + """ + Context manager that runs a HTTPS server with the given private key + and certificate. + + Returns a URL that will connect to the server. + """ + location_hint, endpoint_string = self._port_assigner.assign(reactor) + underlying_endpoint = serverFromString(reactor, endpoint_string) + endpoint = _TLSEndpointWrapper.from_paths( + underlying_endpoint, private_key_path, cert_path + ) + root = Data(b"YOYODYNE", "text/plain") + root.isLeaf = True + listening_port = await endpoint.listen(Site(root)) + try: + yield f"https://127.0.0.1:{listening_port.getHost().port}/" + finally: + await listening_port.stopListening() + + def request(self, url: str, expected_certificate: x509.Certificate): + """ + Send a HTTPS request to the given URL, ensuring that the given + certificate is the one used via SPKI-hash-based pinning comparison. + """ + # No persistent connections, so we don't have dirty reactor at the end + # of the test. + treq_client = HTTPClient( + Agent( + reactor, + _StorageClientHTTPSPolicy( + expected_spki_hash=get_spki_hash(expected_certificate) + ), + pool=HTTPConnectionPool(reactor, persistent=False), + ) + ) + return treq_client.get(url) + + @async_to_deferred + async def test_success(self): + """ + If all conditions are met, a TLS client using the Tahoe-LAFS policy can + connect to the server. + """ + private_key = generate_private_key() + certificate = generate_certificate(private_key) + async with self.listen( + private_key_to_file(FilePath(self.mktemp()), private_key), + cert_to_file(FilePath(self.mktemp()), certificate), + ) as url: + response = await self.request(url, certificate) + self.assertEqual(await response.content(), b"YOYODYNE") + + @async_to_deferred + async def test_server_certificate_has_wrong_hash(self): + """ + If the server's certificate hash doesn't match the hash the client + expects, the request to the server fails. + """ + private_key1 = generate_private_key() + certificate1 = generate_certificate(private_key1) + private_key2 = generate_private_key() + certificate2 = generate_certificate(private_key2) + + async with self.listen( + private_key_to_file(FilePath(self.mktemp()), private_key1), + cert_to_file(FilePath(self.mktemp()), certificate1), + ) as url: + with self.assertRaises(ResponseNeverReceived): + await self.request(url, certificate2) + + @async_to_deferred + async def test_server_certificate_expired(self): + """ + If the server's certificate has expired, the request to the server + succeeds if the hash matches the one the client expects; expiration has + no effect. + """ + private_key = generate_private_key() + certificate = generate_certificate(private_key, expires_days=-10) + + async with self.listen( + private_key_to_file(FilePath(self.mktemp()), private_key), + cert_to_file(FilePath(self.mktemp()), certificate), + ) as url: + response = await self.request(url, certificate) + self.assertEqual(await response.content(), b"YOYODYNE") + + @async_to_deferred + async def test_server_certificate_not_valid_yet(self): + """ + If the server's certificate is only valid starting in The Future, the + request to the server succeeds if the hash matches the one the client + expects; start time has no effect. + """ + private_key = generate_private_key() + certificate = generate_certificate( + private_key, expires_days=10, valid_in_days=5 + ) + + async with self.listen( + private_key_to_file(FilePath(self.mktemp()), private_key), + cert_to_file(FilePath(self.mktemp()), certificate), + ) as url: + response = await self.request(url, certificate) + self.assertEqual(await response.content(), b"YOYODYNE") + + # A potential attack to test is a private key that doesn't match the + # certificate... but OpenSSL (quite rightly) won't let you listen with that + # so I don't know how to test that! See + # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3884 diff --git a/src/allmydata/test/test_storage_web.py b/src/allmydata/test/test_storage_web.py index b3f5fac98..b47c93849 100644 --- a/src/allmydata/test/test_storage_web.py +++ b/src/allmydata/test/test_storage_web.py @@ -19,26 +19,40 @@ import time import os.path import re import json +from unittest import skipIf +from six.moves import StringIO from twisted.trial import unittest - from twisted.internet import defer from twisted.application import service from twisted.web.template import flattenString +from twisted.python.filepath import FilePath +from twisted.python.runtime import platform from foolscap.api import fireEventually from allmydata.util import fileutil, hashutil, base32, pollmixin from allmydata.storage.common import storage_index_to_dir, \ UnknownMutableContainerVersionError, UnknownImmutableContainerVersionError from allmydata.storage.server import StorageServer -from allmydata.storage.crawler import BucketCountingCrawler -from allmydata.storage.expirer import LeaseCheckingCrawler +from allmydata.storage.crawler import ( + BucketCountingCrawler, + _LeaseStateSerializer, +) +from allmydata.storage.expirer import ( + LeaseCheckingCrawler, + _HistorySerializer, +) from allmydata.web.storage import ( StorageStatus, StorageStatusElement, remove_prefix ) -from .common_util import FakeCanary +from allmydata.scripts.admin import ( + migrate_crawler, +) +from allmydata.scripts.runner import ( + Options, +) from .common_web import ( render, @@ -147,7 +161,7 @@ class BucketCounter(unittest.TestCase, pollmixin.PollMixin): html = renderSynchronously(w) s = remove_tags(html) self.failUnlessIn(b"Total buckets: 0 (the number of", s) - self.failUnless(b"Next crawl in 59 minutes" in s or "Next crawl in 60 minutes" in s, s) + self.failUnless(b"Next crawl in 59 minutes" in s or b"Next crawl in 60 minutes" in s, s) d.addCallback(_check2) return d @@ -289,28 +303,27 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): mutable_si_3, rs3, cs3, we3 = make_mutable(b"\x03" * 16) rs3a, cs3a = make_extra_lease(mutable_si_3, 1) sharenums = [0] - canary = FakeCanary() # note: 'tahoe debug dump-share' will not handle this file, since the # inner contents are not a valid CHK share data = b"\xff" * 1000 - a,w = ss.remote_allocate_buckets(immutable_si_0, rs0, cs0, sharenums, - 1000, canary) - w[0].remote_write(0, data) - w[0].remote_close() + a,w = ss.allocate_buckets(immutable_si_0, rs0, cs0, sharenums, + 1000) + w[0].write(0, data) + w[0].close() - a,w = ss.remote_allocate_buckets(immutable_si_1, rs1, cs1, sharenums, - 1000, canary) - w[0].remote_write(0, data) - w[0].remote_close() - ss.remote_add_lease(immutable_si_1, rs1a, cs1a) + a,w = ss.allocate_buckets(immutable_si_1, rs1, cs1, sharenums, + 1000) + w[0].write(0, data) + w[0].close() + ss.add_lease(immutable_si_1, rs1a, cs1a) - writev = ss.remote_slot_testv_and_readv_and_writev + writev = ss.slot_testv_and_readv_and_writev writev(mutable_si_2, (we2, rs2, cs2), {0: ([], [(0,data)], len(data))}, []) writev(mutable_si_3, (we3, rs3, cs3), {0: ([], [(0,data)], len(data))}, []) - ss.remote_add_lease(mutable_si_3, rs3a, cs3a) + ss.add_lease(mutable_si_3, rs3a, cs3a) self.sis = [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] self.renew_secrets = [rs0, rs1, rs1a, rs2, rs3, rs3a] @@ -376,7 +389,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): self.failUnlessEqual(type(lah), list) self.failUnlessEqual(len(lah), 1) self.failUnlessEqual(lah, [ (0.0, DAY, 1) ] ) - self.failUnlessEqual(so_far["leases-per-share-histogram"], {1: 1}) + self.failUnlessEqual(so_far["leases-per-share-histogram"], {"1": 1}) self.failUnlessEqual(so_far["corrupt-shares"], []) sr1 = so_far["space-recovered"] self.failUnlessEqual(sr1["examined-buckets"], 1) @@ -427,9 +440,9 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): self.failIf("cycle-to-date" in s) self.failIf("estimated-remaining-cycle" in s) self.failIf("estimated-current-cycle" in s) - last = s["history"][0] + last = s["history"]["0"] self.failUnlessIn("cycle-start-finish-times", last) - self.failUnlessEqual(type(last["cycle-start-finish-times"]), tuple) + self.failUnlessEqual(type(last["cycle-start-finish-times"]), list) self.failUnlessEqual(last["expiration-enabled"], False) self.failUnlessIn("configured-expiration-mode", last) @@ -437,9 +450,9 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): lah = last["lease-age-histogram"] self.failUnlessEqual(type(lah), list) self.failUnlessEqual(len(lah), 1) - self.failUnlessEqual(lah, [ (0.0, DAY, 6) ] ) + self.failUnlessEqual(lah, [ [0.0, DAY, 6] ] ) - self.failUnlessEqual(last["leases-per-share-histogram"], {1: 2, 2: 2}) + self.failUnlessEqual(last["leases-per-share-histogram"], {"1": 2, "2": 2}) self.failUnlessEqual(last["corrupt-shares"], []) rec = last["space-recovered"] @@ -485,17 +498,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): return d def backdate_lease(self, sf, renew_secret, new_expire_time): - # ShareFile.renew_lease ignores attempts to back-date a lease (i.e. - # "renew" a lease with a new_expire_time that is older than what the - # current lease has), so we have to reach inside it. - for i,lease in enumerate(sf.get_leases()): - if lease.renew_secret == renew_secret: - lease.expiration_time = new_expire_time - f = open(sf.home, 'rb+') - sf._write_lease_record(f, i, lease) - f.close() - return - raise IndexError("unable to renew non-existent lease") + sf.renew_lease(renew_secret, new_expire_time, allow_backdate=True) def test_expire_age(self): basedir = "storage/LeaseCrawler/expire_age" @@ -597,12 +600,12 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): self.failUnlessEqual(count_leases(mutable_si_3), 1) s = lc.get_state() - last = s["history"][0] + last = s["history"]["0"] self.failUnlessEqual(last["expiration-enabled"], True) self.failUnlessEqual(last["configured-expiration-mode"], - ("age", 2000, None, ("mutable", "immutable"))) - self.failUnlessEqual(last["leases-per-share-histogram"], {1: 2, 2: 2}) + ["age", 2000, None, ["mutable", "immutable"]]) + self.failUnlessEqual(last["leases-per-share-histogram"], {"1": 2, "2": 2}) rec = last["space-recovered"] self.failUnlessEqual(rec["examined-buckets"], 4) @@ -741,14 +744,14 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): self.failUnlessEqual(count_leases(mutable_si_3), 1) s = lc.get_state() - last = s["history"][0] + last = s["history"]["0"] self.failUnlessEqual(last["expiration-enabled"], True) self.failUnlessEqual(last["configured-expiration-mode"], - ("cutoff-date", None, then, - ("mutable", "immutable"))) + ["cutoff-date", None, then, + ["mutable", "immutable"]]) self.failUnlessEqual(last["leases-per-share-histogram"], - {1: 2, 2: 2}) + {"1": 2, "2": 2}) rec = last["space-recovered"] self.failUnlessEqual(rec["examined-buckets"], 4) @@ -934,8 +937,8 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): s = lc.get_state() h = s["history"] self.failUnlessEqual(len(h), 10) - self.failUnlessEqual(max(h.keys()), 15) - self.failUnlessEqual(min(h.keys()), 6) + self.failUnlessEqual(max(int(k) for k in h.keys()), 15) + self.failUnlessEqual(min(int(k) for k in h.keys()), 6) d.addCallback(_check) return d @@ -1024,7 +1027,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): def _check(ignored): s = lc.get_state() - last = s["history"][0] + last = s["history"]["0"] rec = last["space-recovered"] self.failUnlessEqual(rec["configured-buckets"], 4) self.failUnlessEqual(rec["configured-shares"], 4) @@ -1120,7 +1123,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): def _after_first_cycle(ignored): s = lc.get_state() - last = s["history"][0] + last = s["history"]["0"] rec = last["space-recovered"] self.failUnlessEqual(rec["examined-buckets"], 5) self.failUnlessEqual(rec["examined-shares"], 3) @@ -1149,6 +1152,390 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): d.addBoth(_cleanup) return d + @skipIf(platform.isWindows(), "pickle test-data can't be loaded on windows") + def test_deserialize_pickle(self): + """ + The crawler can read existing state from the old pickle format + """ + # this file came from an "in the wild" tahoe version 1.16.0 + original_pickle = FilePath(__file__).parent().child("data").child("lease_checker.state.txt") + root = FilePath(self.mktemp()) + storage = root.child("storage") + storage.makedirs() + test_pickle = storage.child("lease_checker.state") + with test_pickle.open("wb") as local, original_pickle.open("rb") as remote: + local.write(remote.read()) + + # convert from pickle format to JSON + top = Options() + top.parseOptions([ + "admin", "migrate-crawler", + "--basedir", storage.parent().path, + ]) + options = top.subOptions + while hasattr(options, "subOptions"): + options = options.subOptions + options.stdout = StringIO() + migrate_crawler(options) + + # the (existing) state file should have been upgraded to JSON + self.assertFalse(test_pickle.exists()) + self.assertTrue(test_pickle.siblingExtension(".json").exists()) + serial = _LeaseStateSerializer(test_pickle.path) + + self.assertEqual( + serial.load(), + { + u'last-complete-prefix': None, + u'version': 1, + u'current-cycle-start-time': 1635003106.611748, + u'last-cycle-finished': 312, + u'cycle-to-date': { + u'leases-per-share-histogram': { + u'1': 36793, + u'2': 1, + }, + u'space-recovered': { + u'examined-buckets-immutable': 17183, + u'configured-buckets-mutable': 0, + u'examined-shares-mutable': 1796, + u'original-shares-mutable': 1563, + u'configured-buckets-immutable': 0, + u'original-shares-immutable': 27926, + u'original-diskbytes-immutable': 431149056, + u'examined-shares-immutable': 34998, + u'original-buckets': 14661, + u'actual-shares-immutable': 0, + u'configured-shares': 0, + u'original-buckets-mutable': 899, + u'actual-diskbytes': 4096, + u'actual-shares-mutable': 0, + u'configured-buckets': 1, + u'examined-buckets-unknown': 14, + u'actual-sharebytes': 0, + u'original-shares': 29489, + u'actual-buckets-immutable': 0, + u'original-sharebytes': 312664812, + u'examined-sharebytes-immutable': 383801602, + u'actual-shares': 0, + u'actual-sharebytes-immutable': 0, + u'original-diskbytes': 441643008, + u'configured-diskbytes-mutable': 0, + u'configured-sharebytes-immutable': 0, + u'configured-shares-mutable': 0, + u'actual-diskbytes-immutable': 0, + u'configured-diskbytes-immutable': 0, + u'original-diskbytes-mutable': 10489856, + u'actual-sharebytes-mutable': 0, + u'configured-sharebytes': 0, + u'examined-shares': 36794, + u'actual-diskbytes-mutable': 0, + u'actual-buckets': 1, + u'original-buckets-immutable': 13761, + u'configured-sharebytes-mutable': 0, + u'examined-sharebytes': 390369660, + u'original-sharebytes-immutable': 308125753, + u'original-sharebytes-mutable': 4539059, + u'actual-buckets-mutable': 0, + u'examined-buckets-mutable': 1043, + u'configured-shares-immutable': 0, + u'examined-diskbytes': 476598272, + u'examined-diskbytes-mutable': 9154560, + u'examined-sharebytes-mutable': 6568058, + u'examined-buckets': 18241, + u'configured-diskbytes': 4096, + u'examined-diskbytes-immutable': 467443712}, + u'corrupt-shares': [ + [u'2dn6xnlnsqwtnapwxfdivpm3s4', 4], + [u'2dn6xnlnsqwtnapwxfdivpm3s4', 1], + [u'2rrzthwsrrxolevmwdvbdy3rqi', 4], + [u'2rrzthwsrrxolevmwdvbdy3rqi', 1], + [u'2skfngcto6h7eqmn4uo7ntk3ne', 4], + [u'2skfngcto6h7eqmn4uo7ntk3ne', 1], + [u'32d5swqpqx2mwix7xmqzvhdwje', 4], + [u'32d5swqpqx2mwix7xmqzvhdwje', 1], + [u'5mmayp66yflmpon3o6unsnbaca', 4], + [u'5mmayp66yflmpon3o6unsnbaca', 1], + [u'6ixhpvbtre7fnrl6pehlrlflc4', 4], + [u'6ixhpvbtre7fnrl6pehlrlflc4', 1], + [u'ewzhvswjsz4vp2bqkb6mi3bz2u', 4], + [u'ewzhvswjsz4vp2bqkb6mi3bz2u', 1], + [u'fu7pazf6ogavkqj6z4q5qqex3u', 4], + [u'fu7pazf6ogavkqj6z4q5qqex3u', 1], + [u'hbyjtqvpcimwxiyqbcbbdn2i4a', 4], + [u'hbyjtqvpcimwxiyqbcbbdn2i4a', 1], + [u'pmcjbdkbjdl26k3e6yja77femq', 4], + [u'pmcjbdkbjdl26k3e6yja77femq', 1], + [u'r6swof4v2uttbiiqwj5pi32cm4', 4], + [u'r6swof4v2uttbiiqwj5pi32cm4', 1], + [u't45v5akoktf53evc2fi6gwnv6y', 4], + [u't45v5akoktf53evc2fi6gwnv6y', 1], + [u'y6zb4faar3rdvn3e6pfg4wlotm', 4], + [u'y6zb4faar3rdvn3e6pfg4wlotm', 1], + [u'z3yghutvqoqbchjao4lndnrh3a', 4], + [u'z3yghutvqoqbchjao4lndnrh3a', 1], + ], + u'lease-age-histogram': { + "1641600,1728000": 78, + "12441600,12528000": 78, + "8640000,8726400": 32, + "1814400,1900800": 1860, + "2764800,2851200": 76, + "11491200,11577600": 20, + "10713600,10800000": 183, + "47865600,47952000": 7, + "3110400,3196800": 328, + "10627200,10713600": 43, + "45619200,45705600": 4, + "12873600,12960000": 5, + "7430400,7516800": 7228, + "1555200,1641600": 492, + "38880000,38966400": 3, + "12528000,12614400": 193, + "7344000,7430400": 12689, + "2678400,2764800": 278, + "2332800,2419200": 12, + "9244800,9331200": 73, + "12787200,12873600": 218, + "49075200,49161600": 19, + "10368000,10454400": 117, + "4665600,4752000": 256, + "7516800,7603200": 993, + "42336000,42422400": 33, + "10972800,11059200": 122, + "39052800,39139200": 51, + "12614400,12700800": 210, + "7603200,7689600": 2004, + "10540800,10627200": 16, + "950400,1036800": 4435, + "42076800,42163200": 4, + "8812800,8899200": 57, + "5788800,5875200": 954, + "36374400,36460800": 3, + "9331200,9417600": 12, + "30499200,30585600": 5, + "12700800,12787200": 25, + "2073600,2160000": 388, + "12960000,13046400": 8, + "11923200,12009600": 89, + "3369600,3456000": 79, + "3196800,3283200": 628, + "37497600,37584000": 11, + "33436800,33523200": 7, + "44928000,45014400": 2, + "37929600,38016000": 3, + "38966400,39052800": 61, + "3283200,3369600": 86, + "11750400,11836800": 7, + "3801600,3888000": 32, + "46310400,46396800": 1, + "4838400,4924800": 386, + "8208000,8294400": 38, + "37411200,37497600": 4, + "12009600,12096000": 329, + "10454400,10540800": 1239, + "40176000,40262400": 1, + "3715200,3801600": 104, + "44409600,44496000": 13, + "38361600,38448000": 5, + "12268800,12355200": 2, + "28771200,28857600": 6, + "41990400,42076800": 10, + "2592000,2678400": 40, + }, + }, + 'current-cycle': None, + 'last-complete-bucket': None, + } + ) + second_serial = _LeaseStateSerializer(serial._path.path) + self.assertEqual( + serial.load(), + second_serial.load(), + ) + + @skipIf(platform.isWindows(), "pickle test-data can't be loaded on windows") + def test_deserialize_history_pickle(self): + """ + The crawler can read existing history state from the old pickle + format + """ + # this file came from an "in the wild" tahoe version 1.16.0 + original_pickle = FilePath(__file__).parent().child("data").child("lease_checker.history.txt") + root = FilePath(self.mktemp()) + storage = root.child("storage") + storage.makedirs() + test_pickle = storage.child("lease_checker.history") + with test_pickle.open("wb") as local, original_pickle.open("rb") as remote: + local.write(remote.read()) + + # convert from pickle format to JSON + top = Options() + top.parseOptions([ + "admin", "migrate-crawler", + "--basedir", storage.parent().path, + ]) + options = top.subOptions + while hasattr(options, "subOptions"): + options = options.subOptions + options.stdout = StringIO() + migrate_crawler(options) + + serial = _HistorySerializer(test_pickle.path) + + self.maxDiff = None + self.assertEqual( + serial.load(), + { + "363": { + 'configured-expiration-mode': ['age', None, None, ['immutable', 'mutable']], + 'expiration-enabled': False, + 'leases-per-share-histogram': { + '1': 39774, + }, + 'lease-age-histogram': [ + [0, 86400, 3125], + [345600, 432000, 4175], + [950400, 1036800, 141], + [1036800, 1123200, 345], + [1123200, 1209600, 81], + [1296000, 1382400, 1832], + [1555200, 1641600, 390], + [1728000, 1814400, 12], + [2073600, 2160000, 84], + [2160000, 2246400, 228], + [2246400, 2332800, 75], + [2592000, 2678400, 644], + [2678400, 2764800, 273], + [2764800, 2851200, 94], + [2851200, 2937600, 97], + [3196800, 3283200, 143], + [3283200, 3369600, 48], + [4147200, 4233600, 374], + [4320000, 4406400, 534], + [5270400, 5356800, 1005], + [6739200, 6825600, 8704], + [6825600, 6912000, 3986], + [6912000, 6998400, 7592], + [6998400, 7084800, 2607], + [7689600, 7776000, 35], + [8035200, 8121600, 33], + [8294400, 8380800, 54], + [8640000, 8726400, 45], + [8726400, 8812800, 27], + [8812800, 8899200, 12], + [9763200, 9849600, 77], + [9849600, 9936000, 91], + [9936000, 10022400, 1210], + [10022400, 10108800, 45], + [10108800, 10195200, 186], + [10368000, 10454400, 113], + [10972800, 11059200, 21], + [11232000, 11318400, 5], + [11318400, 11404800, 19], + [11404800, 11491200, 238], + [11491200, 11577600, 159], + [11750400, 11836800, 1], + [11836800, 11923200, 32], + [11923200, 12009600, 192], + [12009600, 12096000, 222], + [12096000, 12182400, 18], + [12182400, 12268800, 224], + [12268800, 12355200, 9], + [12355200, 12441600, 9], + [12441600, 12528000, 10], + [12528000, 12614400, 6], + [12614400, 12700800, 6], + [12700800, 12787200, 18], + [12787200, 12873600, 6], + [12873600, 12960000, 62], + ], + 'cycle-start-finish-times': [1634446505.241972, 1634446666.055401], + 'space-recovered': { + 'examined-buckets-immutable': 17896, + 'configured-buckets-mutable': 0, + 'examined-shares-mutable': 2473, + 'original-shares-mutable': 1185, + 'configured-buckets-immutable': 0, + 'original-shares-immutable': 27457, + 'original-diskbytes-immutable': 2810982400, + 'examined-shares-immutable': 37301, + 'original-buckets': 14047, + 'actual-shares-immutable': 0, + 'configured-shares': 0, + 'original-buckets-mutable': 691, + 'actual-diskbytes': 4096, + 'actual-shares-mutable': 0, + 'configured-buckets': 1, + 'examined-buckets-unknown': 14, + 'actual-sharebytes': 0, + 'original-shares': 28642, + 'actual-buckets-immutable': 0, + 'original-sharebytes': 2695552941, + 'examined-sharebytes-immutable': 2754798505, + 'actual-shares': 0, + 'actual-sharebytes-immutable': 0, + 'original-diskbytes': 2818981888, + 'configured-diskbytes-mutable': 0, + 'configured-sharebytes-immutable': 0, + 'configured-shares-mutable': 0, + 'actual-diskbytes-immutable': 0, + 'configured-diskbytes-immutable': 0, + 'original-diskbytes-mutable': 7995392, + 'actual-sharebytes-mutable': 0, + 'configured-sharebytes': 0, + 'examined-shares': 39774, + 'actual-diskbytes-mutable': 0, + 'actual-buckets': 1, + 'original-buckets-immutable': 13355, + 'configured-sharebytes-mutable': 0, + 'examined-sharebytes': 2763646972, + 'original-sharebytes-immutable': 2692076909, + 'original-sharebytes-mutable': 3476032, + 'actual-buckets-mutable': 0, + 'examined-buckets-mutable': 1286, + 'configured-shares-immutable': 0, + 'examined-diskbytes': 2854801408, + 'examined-diskbytes-mutable': 12161024, + 'examined-sharebytes-mutable': 8848467, + 'examined-buckets': 19197, + 'configured-diskbytes': 4096, + 'examined-diskbytes-immutable': 2842640384 + }, + 'corrupt-shares': [ + ['2dn6xnlnsqwtnapwxfdivpm3s4', 3], + ['2dn6xnlnsqwtnapwxfdivpm3s4', 0], + ['2rrzthwsrrxolevmwdvbdy3rqi', 3], + ['2rrzthwsrrxolevmwdvbdy3rqi', 0], + ['2skfngcto6h7eqmn4uo7ntk3ne', 3], + ['2skfngcto6h7eqmn4uo7ntk3ne', 0], + ['32d5swqpqx2mwix7xmqzvhdwje', 3], + ['32d5swqpqx2mwix7xmqzvhdwje', 0], + ['5mmayp66yflmpon3o6unsnbaca', 3], + ['5mmayp66yflmpon3o6unsnbaca', 0], + ['6ixhpvbtre7fnrl6pehlrlflc4', 3], + ['6ixhpvbtre7fnrl6pehlrlflc4', 0], + ['ewzhvswjsz4vp2bqkb6mi3bz2u', 3], + ['ewzhvswjsz4vp2bqkb6mi3bz2u', 0], + ['fu7pazf6ogavkqj6z4q5qqex3u', 3], + ['fu7pazf6ogavkqj6z4q5qqex3u', 0], + ['hbyjtqvpcimwxiyqbcbbdn2i4a', 3], + ['hbyjtqvpcimwxiyqbcbbdn2i4a', 0], + ['pmcjbdkbjdl26k3e6yja77femq', 3], + ['pmcjbdkbjdl26k3e6yja77femq', 0], + ['r6swof4v2uttbiiqwj5pi32cm4', 3], + ['r6swof4v2uttbiiqwj5pi32cm4', 0], + ['t45v5akoktf53evc2fi6gwnv6y', 3], + ['t45v5akoktf53evc2fi6gwnv6y', 0], + ['y6zb4faar3rdvn3e6pfg4wlotm', 3], + ['y6zb4faar3rdvn3e6pfg4wlotm', 0], + ['z3yghutvqoqbchjao4lndnrh3a', 3], + ['z3yghutvqoqbchjao4lndnrh3a', 0], + ] + } + } + ) + class WebStatus(unittest.TestCase, pollmixin.PollMixin): diff --git a/src/allmydata/test/test_system.py b/src/allmydata/test/test_system.py index 3e1bdcdd4..670ac5868 100644 --- a/src/allmydata/test/test_system.py +++ b/src/allmydata/test/test_system.py @@ -12,7 +12,7 @@ if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, dict, list, object, range, max, min, str # noqa: F401 from past.builtins import chr as byteschr, long -from six import ensure_text, ensure_str +from six import ensure_text import os, re, sys, time, json @@ -23,6 +23,7 @@ from twisted.internet import defer from allmydata import uri from allmydata.storage.mutable import MutableShareFile +from allmydata.storage.immutable import ShareFile from allmydata.storage.server import si_a2b from allmydata.immutable import offloaded, upload from allmydata.immutable.literal import LiteralFileNode @@ -116,11 +117,17 @@ class CountingDataUploadable(upload.Data): class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): - + """Foolscap integration-y tests.""" + FORCE_FOOLSCAP_FOR_STORAGE = True timeout = 180 + @property + def basedir(self): + return "system/SystemTest/{}-foolscap-{}".format( + self.id().split(".")[-1], self.FORCE_FOOLSCAP_FOR_STORAGE + ) + def test_connections(self): - self.basedir = "system/SystemTest/test_connections" d = self.set_up_nodes() self.extra_node = None d.addCallback(lambda res: self.add_extra_node(self.numclients)) @@ -148,11 +155,9 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): del test_connections def test_upload_and_download_random_key(self): - self.basedir = "system/SystemTest/test_upload_and_download_random_key" return self._test_upload_and_download(convergence=None) def test_upload_and_download_convergent(self): - self.basedir = "system/SystemTest/test_upload_and_download_convergent" return self._test_upload_and_download(convergence=b"some convergence string") def _test_upload_and_download(self, convergence): @@ -515,7 +520,6 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): def test_mutable(self): - self.basedir = "system/SystemTest/test_mutable" DATA = b"initial contents go here." # 25 bytes % 3 != 0 DATA_uploadable = MutableData(DATA) NEWDATA = b"new contents yay" @@ -745,7 +749,6 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): # plaintext_hash check. def test_filesystem(self): - self.basedir = "system/SystemTest/test_filesystem" self.data = LARGE_DATA d = self.set_up_nodes() def _new_happy_semantics(ign): @@ -780,7 +783,6 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): d.addCallback(self._check_publish_private) d.addCallback(self.log, "did _check_publish_private") d.addCallback(self._test_web) - d.addCallback(self._test_control) d.addCallback(self._test_cli) # P now has four top-level children: # P/personal/sekrit data @@ -1291,9 +1293,9 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): # are sharefiles here filename = os.path.join(dirpath, filenames[0]) # peek at the magic to see if it is a chk share - magic = open(filename, "rb").read(4) - if magic == b'\x00\x00\x00\x01': - break + with open(filename, "rb") as f: + if ShareFile.is_valid_header(f.read(32)): + break else: self.fail("unable to find any uri_extension files in %r" % self.basedir) @@ -1343,25 +1345,6 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): if line.startswith("CHK %s " % storage_index_s)] self.failUnlessEqual(len(matching), 10) - def _test_control(self, res): - # exercise the remote-control-the-client foolscap interfaces in - # allmydata.control (mostly used for performance tests) - c0 = self.clients[0] - control_furl_file = c0.config.get_private_path("control.furl") - control_furl = ensure_str(open(control_furl_file, "r").read().strip()) - # it doesn't really matter which Tub we use to connect to the client, - # so let's just use our IntroducerNode's - d = self.introducer.tub.getReference(control_furl) - d.addCallback(self._test_control2, control_furl_file) - return d - def _test_control2(self, rref, filename): - d = defer.succeed(None) - d.addCallback(lambda res: rref.callRemote("speed_test", 1, 200, False)) - if sys.platform in ("linux2", "linux3"): - d.addCallback(lambda res: rref.callRemote("get_memory_usage")) - d.addCallback(lambda res: rref.callRemote("measure_peer_response_time")) - return d - def _test_cli(self, res): # run various CLI commands (in a thread, since they use blocking # network calls) @@ -1732,7 +1715,6 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): def test_filesystem_with_cli_in_subprocess(self): # We do this in a separate test so that test_filesystem doesn't skip if we can't run bin/tahoe. - self.basedir = "system/SystemTest/test_filesystem_with_cli_in_subprocess" d = self.set_up_nodes() def _new_happy_semantics(ign): for c in self.clients: @@ -1813,9 +1795,21 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): class Connections(SystemTestMixin, unittest.TestCase): + FORCE_FOOLSCAP_FOR_STORAGE = True def test_rref(self): - self.basedir = "system/Connections/rref" + # The way the listening port is created is via + # SameProcessStreamEndpointAssigner (allmydata.test.common), which then + # makes an endpoint string parsed by AdoptedServerPort. The latter does + # dup(fd), which results in the filedescriptor staying alive _until the + # test ends_. That means that when we disown the service, we still have + # the listening port there on the OS level! Just the resulting + # connections aren't handled. So this test relies on aggressive + # timeouts in the HTTP client and presumably some equivalent in + # Foolscap, since connection refused does _not_ happen. + self.basedir = "system/Connections/rref-foolscap-{}".format( + self.FORCE_FOOLSCAP_FOR_STORAGE + ) d = self.set_up_nodes(2) def _start(ign): self.c0 = self.clients[0] @@ -1831,9 +1825,13 @@ class Connections(SystemTestMixin, unittest.TestCase): # now shut down the server d.addCallback(lambda ign: self.clients[1].disownServiceParent()) + + # kill any persistent http connections that might continue to work + d.addCallback(lambda ign: self.close_idle_http_connections()) + # and wait for the client to notice def _poll(): - return len(self.c0.storage_broker.get_connected_servers()) < 2 + return len(self.c0.storage_broker.get_connected_servers()) == 1 d.addCallback(lambda ign: self.poll(_poll)) def _down(ign): @@ -1843,3 +1841,16 @@ class Connections(SystemTestMixin, unittest.TestCase): self.assertEqual(storage_server, self.s1_storage_server) d.addCallback(_down) return d + + +class HTTPSystemTest(SystemTest): + """HTTP storage protocol variant of the system tests.""" + + FORCE_FOOLSCAP_FOR_STORAGE = False + + + +class HTTPConnections(Connections): + """HTTP storage protocol variant of the connections tests.""" + FORCE_FOOLSCAP_FOR_STORAGE = False + diff --git a/src/allmydata/test/test_testing.py b/src/allmydata/test/test_testing.py index 527b235bd..3715d1aca 100644 --- a/src/allmydata/test/test_testing.py +++ b/src/allmydata/test/test_testing.py @@ -46,9 +46,10 @@ from hypothesis.strategies import ( binary, ) -from testtools import ( - TestCase, +from .common import ( + SyncTestCase, ) + from testtools.matchers import ( Always, Equals, @@ -61,7 +62,7 @@ from testtools.twistedsupport import ( ) -class FakeWebTest(TestCase): +class FakeWebTest(SyncTestCase): """ Test the WebUI verified-fakes infrastucture """ diff --git a/src/allmydata/test/test_upload.py b/src/allmydata/test/test_upload.py index 8d5435e88..18192de6c 100644 --- a/src/allmydata/test/test_upload.py +++ b/src/allmydata/test/test_upload.py @@ -983,7 +983,7 @@ class EncodingParameters(GridTestMixin, unittest.TestCase, SetDEPMixin, num_segments = encoder.get_param("num_segments") d = selector.get_shareholders(broker, sh, storage_index, share_size, block_size, num_segments, - 10, 3, 4) + 10, 3, 4, encoder.get_uri_extension_size()) def _have_shareholders(upload_trackers_and_already_servers): (upload_trackers, already_servers) = upload_trackers_and_already_servers assert servers_to_break <= len(upload_trackers) diff --git a/src/allmydata/test/test_util.py b/src/allmydata/test/test_util.py index a03845ed6..9a0af1e06 100644 --- a/src/allmydata/test/test_util.py +++ b/src/allmydata/test/test_util.py @@ -553,11 +553,6 @@ class JSONBytes(unittest.TestCase): o, cls=jsonbytes.AnyBytesJSONEncoder)), expected, ) - self.assertEqual( - json.loads(jsonbytes.dumps(o, any_bytes=True)), - expected - ) - class FakeGetVersion(object): diff --git a/src/allmydata/test/web/test_grid.py b/src/allmydata/test/web/test_grid.py index edcf32268..1ebe3a90f 100644 --- a/src/allmydata/test/web/test_grid.py +++ b/src/allmydata/test/web/test_grid.py @@ -18,7 +18,6 @@ from six.moves import StringIO from bs4 import BeautifulSoup from twisted.web import resource -from twisted.trial import unittest from allmydata import uri, dirnode from allmydata.util import base32 from allmydata.util.encodingutil import to_bytes @@ -43,6 +42,21 @@ from .common import ( unknown_rwcap, ) +from ..common import ( + AsyncTestCase, +) + +from testtools.matchers import ( + Equals, + Contains, + Not, + HasLength, + EndsWith, +) + +from testtools.twistedsupport import flush_logged_errors + + DIR_HTML_TAG = '' class CompletelyUnhandledError(Exception): @@ -53,7 +67,7 @@ class ErrorBoom(resource.Resource, object): def render(self, req): raise CompletelyUnhandledError("whoops") -class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMixin, unittest.TestCase): +class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMixin, AsyncTestCase): def CHECK(self, ign, which, args, clientnum=0): fileurl = self.fileurls[which] @@ -117,37 +131,37 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi d.addCallback(self.CHECK, "good", "t=check") def _got_html_good(res): - self.failUnlessIn("Healthy", res) - self.failIfIn("Not Healthy", res) + self.assertThat(res, Contains("Healthy")) + self.assertThat(res, Not(Contains("Not Healthy", ))) soup = BeautifulSoup(res, 'html5lib') assert_soup_has_favicon(self, soup) d.addCallback(_got_html_good) d.addCallback(self.CHECK, "good", "t=check&return_to=somewhere") def _got_html_good_return_to(res): - self.failUnlessIn("Healthy", res) - self.failIfIn("Not Healthy", res) - self.failUnlessIn('Return to file', res) + self.assertThat(res, Contains("Healthy")) + self.assertThat(res, Not(Contains("Not Healthy"))) + self.assertThat(res, Contains('Return to file')) d.addCallback(_got_html_good_return_to) d.addCallback(self.CHECK, "good", "t=check&output=json") def _got_json_good(res): r = json.loads(res) self.failUnlessEqual(r["summary"], "Healthy") self.failUnless(r["results"]["healthy"]) - self.failIfIn("needs-rebalancing", r["results"]) + self.assertThat(r["results"], Not(Contains("needs-rebalancing",))) self.failUnless(r["results"]["recoverable"]) d.addCallback(_got_json_good) d.addCallback(self.CHECK, "small", "t=check") def _got_html_small(res): - self.failUnlessIn("Literal files are always healthy", res) - self.failIfIn("Not Healthy", res) + self.assertThat(res, Contains("Literal files are always healthy")) + self.assertThat(res, Not(Contains("Not Healthy"))) d.addCallback(_got_html_small) d.addCallback(self.CHECK, "small", "t=check&return_to=somewhere") def _got_html_small_return_to(res): - self.failUnlessIn("Literal files are always healthy", res) - self.failIfIn("Not Healthy", res) - self.failUnlessIn('Return to file', res) + self.assertThat(res, Contains("Literal files are always healthy")) + self.assertThat(res, Not(Contains("Not Healthy"))) + self.assertThat(res, Contains('Return to file')) d.addCallback(_got_html_small_return_to) d.addCallback(self.CHECK, "small", "t=check&output=json") def _got_json_small(res): @@ -158,8 +172,8 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi d.addCallback(self.CHECK, "smalldir", "t=check") def _got_html_smalldir(res): - self.failUnlessIn("Literal files are always healthy", res) - self.failIfIn("Not Healthy", res) + self.assertThat(res, Contains("Literal files are always healthy")) + self.assertThat(res, Not(Contains("Not Healthy"))) d.addCallback(_got_html_smalldir) d.addCallback(self.CHECK, "smalldir", "t=check&output=json") def _got_json_smalldir(res): @@ -170,43 +184,43 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi d.addCallback(self.CHECK, "sick", "t=check") def _got_html_sick(res): - self.failUnlessIn("Not Healthy", res) + self.assertThat(res, Contains("Not Healthy")) d.addCallback(_got_html_sick) d.addCallback(self.CHECK, "sick", "t=check&output=json") def _got_json_sick(res): r = json.loads(res) self.failUnlessEqual(r["summary"], "Not Healthy: 9 shares (enc 3-of-10)") - self.failIf(r["results"]["healthy"]) + self.assertThat(r["results"]["healthy"], Equals(False)) self.failUnless(r["results"]["recoverable"]) - self.failIfIn("needs-rebalancing", r["results"]) + self.assertThat(r["results"], Not(Contains("needs-rebalancing"))) d.addCallback(_got_json_sick) d.addCallback(self.CHECK, "dead", "t=check") def _got_html_dead(res): - self.failUnlessIn("Not Healthy", res) + self.assertThat(res, Contains("Not Healthy")) d.addCallback(_got_html_dead) d.addCallback(self.CHECK, "dead", "t=check&output=json") def _got_json_dead(res): r = json.loads(res) self.failUnlessEqual(r["summary"], "Not Healthy: 1 shares (enc 3-of-10)") - self.failIf(r["results"]["healthy"]) - self.failIf(r["results"]["recoverable"]) - self.failIfIn("needs-rebalancing", r["results"]) + self.assertThat(r["results"]["healthy"], Equals(False)) + self.assertThat(r["results"]["recoverable"], Equals(False)) + self.assertThat(r["results"], Not(Contains("needs-rebalancing"))) d.addCallback(_got_json_dead) d.addCallback(self.CHECK, "corrupt", "t=check&verify=true") def _got_html_corrupt(res): - self.failUnlessIn("Not Healthy! : Unhealthy", res) + self.assertThat(res, Contains("Not Healthy! : Unhealthy")) d.addCallback(_got_html_corrupt) d.addCallback(self.CHECK, "corrupt", "t=check&verify=true&output=json") def _got_json_corrupt(res): r = json.loads(res) - self.failUnlessIn("Unhealthy: 9 shares (enc 3-of-10)", r["summary"]) - self.failIf(r["results"]["healthy"]) + self.assertThat(r["summary"], Contains("Unhealthy: 9 shares (enc 3-of-10)")) + self.assertThat(r["results"]["healthy"], Equals(False)) self.failUnless(r["results"]["recoverable"]) - self.failIfIn("needs-rebalancing", r["results"]) + self.assertThat(r["results"], Not(Contains("needs-rebalancing"))) self.failUnlessReallyEqual(r["results"]["count-happiness"], 9) self.failUnlessReallyEqual(r["results"]["count-shares-good"], 9) self.failUnlessReallyEqual(r["results"]["count-corrupt-shares"], 1) @@ -261,9 +275,9 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi d.addCallback(self.CHECK, "good", "t=check&repair=true") def _got_html_good(res): - self.failUnlessIn("Healthy", res) - self.failIfIn("Not Healthy", res) - self.failUnlessIn("No repair necessary", res) + self.assertThat(res, Contains("Healthy")) + self.assertThat(res, Not(Contains("Not Healthy"))) + self.assertThat(res, Contains("No repair necessary", )) soup = BeautifulSoup(res, 'html5lib') assert_soup_has_favicon(self, soup) @@ -271,9 +285,9 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi d.addCallback(self.CHECK, "sick", "t=check&repair=true") def _got_html_sick(res): - self.failUnlessIn("Healthy : healthy", res) - self.failIfIn("Not Healthy", res) - self.failUnlessIn("Repair successful", res) + self.assertThat(res, Contains("Healthy : healthy")) + self.assertThat(res, Not(Contains("Not Healthy"))) + self.assertThat(res, Contains("Repair successful")) d.addCallback(_got_html_sick) # repair of a dead file will fail, of course, but it isn't yet @@ -290,9 +304,9 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi d.addCallback(self.CHECK, "corrupt", "t=check&verify=true&repair=true") def _got_html_corrupt(res): - self.failUnlessIn("Healthy : Healthy", res) - self.failIfIn("Not Healthy", res) - self.failUnlessIn("Repair successful", res) + self.assertThat(res, Contains("Healthy : Healthy")) + self.assertThat(res, Not(Contains("Not Healthy"))) + self.assertThat(res, Contains("Repair successful")) d.addCallback(_got_html_corrupt) d.addErrback(self.explain_web_error) @@ -392,31 +406,31 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi if expect_rw_uri: self.failUnlessReallyEqual(to_bytes(f[1]["rw_uri"]), unknown_rwcap, data) else: - self.failIfIn("rw_uri", f[1]) + self.assertThat(f[1], Not(Contains("rw_uri"))) if immutable: self.failUnlessReallyEqual(to_bytes(f[1]["ro_uri"]), unknown_immcap, data) else: self.failUnlessReallyEqual(to_bytes(f[1]["ro_uri"]), unknown_rocap, data) - self.failUnlessIn("metadata", f[1]) + self.assertThat(f[1], Contains("metadata")) d.addCallback(_check_directory_json, expect_rw_uri=not immutable) def _check_info(res, expect_rw_uri, expect_ro_uri): if expect_rw_uri: - self.failUnlessIn(unknown_rwcap, res) + self.assertThat(res, Contains(unknown_rwcap)) if expect_ro_uri: if immutable: - self.failUnlessIn(unknown_immcap, res) + self.assertThat(res, Contains(unknown_immcap)) else: - self.failUnlessIn(unknown_rocap, res) + self.assertThat(res, Contains(unknown_rocap)) else: - self.failIfIn(unknown_rocap, res) + self.assertThat(res, Not(Contains(unknown_rocap))) res = str(res, "utf-8") - self.failUnlessIn("Object Type: unknown", res) - self.failIfIn("Raw data as", res) - self.failIfIn("Directory writecap", res) - self.failIfIn("Checker Operations", res) - self.failIfIn("Mutable File Operations", res) - self.failIfIn("Directory Operations", res) + self.assertThat(res, Contains("Object Type: unknown")) + self.assertThat(res, Not(Contains("Raw data as"))) + self.assertThat(res, Not(Contains("Directory writecap"))) + self.assertThat(res, Not(Contains("Checker Operations"))) + self.assertThat(res, Not(Contains("Mutable File Operations"))) + self.assertThat(res, Not(Contains("Directory Operations"))) # FIXME: these should have expect_rw_uri=not immutable; I don't know # why they fail. Possibly related to ticket #922. @@ -432,7 +446,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi if expect_rw_uri: self.failUnlessReallyEqual(to_bytes(data[1]["rw_uri"]), unknown_rwcap, data) else: - self.failIfIn("rw_uri", data[1]) + self.assertThat(data[1], Not(Contains("rw_uri"))) if immutable: self.failUnlessReallyEqual(to_bytes(data[1]["ro_uri"]), unknown_immcap, data) @@ -442,10 +456,10 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.failUnlessReallyEqual(data[1]["mutable"], True) else: self.failUnlessReallyEqual(to_bytes(data[1]["ro_uri"]), unknown_rocap, data) - self.failIfIn("mutable", data[1]) + self.assertThat(data[1], Not(Contains("mutable"))) # TODO: check metadata contents - self.failUnlessIn("metadata", data[1]) + self.assertThat(data[1], Contains("metadata")) d.addCallback(lambda ign: self.GET("%s/%s?t=json" % (self.rooturl, str(name)))) d.addCallback(_check_json, expect_rw_uri=not immutable) @@ -519,14 +533,14 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi def _created(dn): self.failUnless(isinstance(dn, dirnode.DirectoryNode)) - self.failIf(dn.is_mutable()) + self.assertThat(dn.is_mutable(), Equals(False)) self.failUnless(dn.is_readonly()) # This checks that if we somehow ended up calling dn._decrypt_rwcapdata, it would fail. - self.failIf(hasattr(dn._node, 'get_writekey')) + self.assertThat(hasattr(dn._node, 'get_writekey'), Equals(False)) rep = str(dn) - self.failUnlessIn("RO-IMM", rep) + self.assertThat(rep, Contains("RO-IMM")) cap = dn.get_cap() - self.failUnlessIn(b"CHK", cap.to_string()) + self.assertThat(cap.to_string(), Contains(b"CHK")) self.cap = cap self.rootnode = dn self.rooturl = "uri/" + url_quote(dn.get_uri()) @@ -546,7 +560,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi (name_utf8, ro_uri, rwcapdata, metadata_s), subpos = split_netstring(entry, 4) name = name_utf8.decode("utf-8") self.failUnlessEqual(rwcapdata, b"") - self.failUnlessIn(name, kids) + self.assertThat(kids, Contains(name)) (expected_child, ign) = kids[name] self.failUnlessReallyEqual(ro_uri, expected_child.get_readonly_uri()) numkids += 1 @@ -572,27 +586,27 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi d.addCallback(lambda ign: self.GET(self.rooturl)) def _check_html(res): soup = BeautifulSoup(res, 'html5lib') - self.failIfIn(b"URI:SSK", res) + self.assertThat(res, Not(Contains(b"URI:SSK"))) found = False for td in soup.find_all(u"td"): if td.text != u"FILE": continue a = td.findNextSibling()(u"a")[0] - self.assertIn(url_quote(lonely_uri), a[u"href"]) - self.assertEqual(u"lonely", a.text) - self.assertEqual(a[u"rel"], [u"noreferrer"]) - self.assertEqual(u"{}".format(len("one")), td.findNextSibling().findNextSibling().text) + self.assertThat(a[u"href"], Contains(url_quote(lonely_uri))) + self.assertThat(a.text, Equals(u"lonely")) + self.assertThat(a[u"rel"], Equals([u"noreferrer"])) + self.assertThat(td.findNextSibling().findNextSibling().text, Equals(u"{}".format(len("one")))) found = True break - self.assertTrue(found) + self.assertThat(found, Equals(True)) infos = list( a[u"href"] for a in soup.find_all(u"a") if a.text == u"More Info" ) - self.assertEqual(1, len(infos)) - self.assertTrue(infos[0].endswith(url_quote(lonely_uri) + "?t=info")) + self.assertThat(infos, HasLength(1)) + self.assertThat(infos[0], EndsWith(url_quote(lonely_uri) + "?t=info")) d.addCallback(_check_html) # ... and in JSON. @@ -604,7 +618,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.failUnlessReallyEqual(sorted(listed_children.keys()), [u"lonely"]) ll_type, ll_data = listed_children[u"lonely"] self.failUnlessEqual(ll_type, "filenode") - self.failIfIn("rw_uri", ll_data) + self.assertThat(ll_data, Not(Contains("rw_uri"))) self.failUnlessReallyEqual(to_bytes(ll_data["ro_uri"]), lonely_uri) d.addCallback(_check_json) return d @@ -744,8 +758,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi error_line = lines[first_error] error_msg = lines[first_error+1:] error_msg_s = "\n".join(error_msg) + "\n" - self.failUnlessIn("ERROR: UnrecoverableFileError(no recoverable versions)", - error_line) + self.assertThat(error_line, Contains("ERROR: UnrecoverableFileError(no recoverable versions)")) self.failUnless(len(error_msg) > 2, error_msg_s) # some traceback units = [json.loads(line) for line in lines[:first_error]] self.failUnlessReallyEqual(len(units), 6) # includes subdir @@ -765,8 +778,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi error_line = lines[first_error] error_msg = lines[first_error+1:] error_msg_s = "\n".join(error_msg) + "\n" - self.failUnlessIn("ERROR: UnrecoverableFileError(no recoverable versions)", - error_line) + self.assertThat(error_line, Contains("ERROR: UnrecoverableFileError(no recoverable versions)")) self.failUnless(len(error_msg) > 2, error_msg_s) # some traceback units = [json.loads(line) for line in lines[:first_error]] self.failUnlessReallyEqual(len(units), 6) # includes subdir @@ -936,8 +948,8 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi d.addCallback(self.CHECK, "one", "t=check") # no add-lease def _got_html_good(res): - self.failUnlessIn("Healthy", res) - self.failIfIn("Not Healthy", res) + self.assertThat(res, Contains("Healthy")) + self.assertThat(res, Not(Contains("Not Healthy"))) d.addCallback(_got_html_good) d.addCallback(self._count_leases, "one") @@ -1111,7 +1123,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.GET, self.fileurls["0shares"])) def _check_zero_shares(body): body = str(body, "utf-8") - self.failIfIn("", body) + self.assertThat(body, Not(Contains(""))) body = " ".join(body.strip().split()) exp = ("NoSharesError: no shares could be found. " "Zero shares usually indicates a corrupt URI, or that " @@ -1129,7 +1141,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.GET, self.fileurls["1share"])) def _check_one_share(body): body = str(body, "utf-8") - self.failIfIn("", body) + self.assertThat(body, Not(Contains(""))) body = " ".join(body.strip().split()) msgbase = ("NotEnoughSharesError: This indicates that some " "servers were unavailable, or that shares have been " @@ -1154,17 +1166,16 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.GET, self.fileurls["imaginary"])) def _missing_child(body): body = str(body, "utf-8") - self.failUnlessIn("No such child: imaginary", body) + self.assertThat(body, Contains("No such child: imaginary")) d.addCallback(_missing_child) d.addCallback(lambda ignored: self.GET_unicode(self.fileurls["dir-0share"])) def _check_0shares_dir_html(body): - self.failUnlessIn(DIR_HTML_TAG, body) + self.assertThat(body, Contains(DIR_HTML_TAG)) # we should see the regular page, but without the child table or # the dirops forms body = " ".join(body.strip().split()) - self.failUnlessIn('href="?t=info">More info on this directory', - body) + self.assertThat(body, Contains('href="?t=info">More info on this directory')) exp = ("UnrecoverableFileError: the directory (or mutable file) " "could not be retrieved, because there were insufficient " "good shares. This might indicate that no servers were " @@ -1172,8 +1183,8 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi "was corrupt, or that shares have been lost due to server " "departure, hard drive failure, or disk corruption. You " "should perform a filecheck on this object to learn more.") - self.failUnlessIn(exp, body) - self.failUnlessIn("No upload forms: directory is unreadable", body) + self.assertThat(body, Contains(exp)) + self.assertThat(body, Contains("No upload forms: directory is unreadable")) d.addCallback(_check_0shares_dir_html) d.addCallback(lambda ignored: self.GET_unicode(self.fileurls["dir-1share"])) @@ -1182,10 +1193,9 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi # and some-shares like we did for immutable files (since there # are different sorts of advice to offer in each case). For now, # they present the same way. - self.failUnlessIn(DIR_HTML_TAG, body) + self.assertThat(body, Contains(DIR_HTML_TAG)) body = " ".join(body.strip().split()) - self.failUnlessIn('href="?t=info">More info on this directory', - body) + self.assertThat(body, Contains('href="?t=info">More info on this directory')) exp = ("UnrecoverableFileError: the directory (or mutable file) " "could not be retrieved, because there were insufficient " "good shares. This might indicate that no servers were " @@ -1193,8 +1203,8 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi "was corrupt, or that shares have been lost due to server " "departure, hard drive failure, or disk corruption. You " "should perform a filecheck on this object to learn more.") - self.failUnlessIn(exp, body) - self.failUnlessIn("No upload forms: directory is unreadable", body) + self.assertThat(body, Contains(exp)) + self.assertThat(body, Contains("No upload forms: directory is unreadable")) d.addCallback(_check_1shares_dir_html) d.addCallback(lambda ignored: @@ -1204,7 +1214,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.fileurls["dir-0share-json"])) def _check_unrecoverable_file(body): body = str(body, "utf-8") - self.failIfIn("", body) + self.assertThat(body, Not(Contains(""))) body = " ".join(body.strip().split()) exp = ("UnrecoverableFileError: the directory (or mutable file) " "could not be retrieved, because there were insufficient " @@ -1213,7 +1223,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi "was corrupt, or that shares have been lost due to server " "departure, hard drive failure, or disk corruption. You " "should perform a filecheck on this object to learn more.") - self.failUnlessIn(exp, body) + self.assertThat(body, Contains(exp)) d.addCallback(_check_unrecoverable_file) d.addCallback(lambda ignored: @@ -1245,7 +1255,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi headers={"accept": "*/*"})) def _internal_error_html1(body): body = str(body, "utf-8") - self.failUnlessIn("", "expected HTML, not '%s'" % body) + self.assertThat("expected HTML, not '%s'" % body, Contains("")) d.addCallback(_internal_error_html1) d.addCallback(lambda ignored: @@ -1255,8 +1265,9 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi headers={"accept": "text/plain"})) def _internal_error_text2(body): body = str(body, "utf-8") - self.failIfIn("", body) + self.assertThat(body, Not(Contains(""))) self.failUnless(body.startswith("Traceback "), body) + d.addCallback(_internal_error_text2) CLI_accepts = "text/plain, application/octet-stream" @@ -1267,7 +1278,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi headers={"accept": CLI_accepts})) def _internal_error_text3(body): body = str(body, "utf-8") - self.failIfIn("", body) + self.assertThat(body, Not(Contains(""))) self.failUnless(body.startswith("Traceback "), body) d.addCallback(_internal_error_text3) @@ -1276,12 +1287,12 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi 500, "Internal Server Error", None, self.GET, "ERRORBOOM")) def _internal_error_html4(body): - self.failUnlessIn(b"", body) + self.assertThat(body, Contains(b"")) d.addCallback(_internal_error_html4) def _flush_errors(res): # Trial: please ignore the CompletelyUnhandledError in the logs - self.flushLoggedErrors(CompletelyUnhandledError) + flush_logged_errors(CompletelyUnhandledError) return res d.addBoth(_flush_errors) @@ -1312,8 +1323,8 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi d.addCallback(_stash_dir) d.addCallback(lambda ign: self.GET_unicode(self.dir_url, followRedirect=True)) def _check_dir_html(body): - self.failUnlessIn(DIR_HTML_TAG, body) - self.failUnlessIn("blacklisted.txt", body) + self.assertThat(body, Contains(DIR_HTML_TAG)) + self.assertThat(body, Contains("blacklisted.txt")) d.addCallback(_check_dir_html) d.addCallback(lambda ign: self.GET(self.url)) d.addCallback(lambda body: self.failUnlessEqual(DATA, body)) @@ -1336,8 +1347,8 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi # We should still be able to list the parent directory, in HTML... d.addCallback(lambda ign: self.GET_unicode(self.dir_url, followRedirect=True)) def _check_dir_html2(body): - self.failUnlessIn(DIR_HTML_TAG, body) - self.failUnlessIn("blacklisted.txt", body) + self.assertThat(body, Contains(DIR_HTML_TAG)) + self.assertThat(body, Contains("blacklisted.txt")) d.addCallback(_check_dir_html2) # ... and in JSON (used by CLI). @@ -1347,8 +1358,8 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.failUnless(isinstance(data, list), data) self.failUnlessEqual(data[0], "dirnode") self.failUnless(isinstance(data[1], dict), data) - self.failUnlessIn("children", data[1]) - self.failUnlessIn("blacklisted.txt", data[1]["children"]) + self.assertThat(data[1], Contains("children")) + self.assertThat(data[1]["children"], Contains("blacklisted.txt")) childdata = data[1]["children"]["blacklisted.txt"] self.failUnless(isinstance(childdata, list), data) self.failUnlessEqual(childdata[0], "filenode") @@ -1387,7 +1398,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.child_url = b"uri/"+dn.get_readonly_uri()+b"/child" d.addCallback(_get_dircap) d.addCallback(lambda ign: self.GET(self.dir_url_base, followRedirect=True)) - d.addCallback(lambda body: self.failUnlessIn(DIR_HTML_TAG, str(body, "utf-8"))) + d.addCallback(lambda body: self.assertThat(str(body, "utf-8"), Contains(DIR_HTML_TAG))) d.addCallback(lambda ign: self.GET(self.dir_url_json1)) d.addCallback(lambda res: json.loads(res)) # just check it decodes d.addCallback(lambda ign: self.GET(self.dir_url_json2)) diff --git a/src/allmydata/test/web/test_introducer.py b/src/allmydata/test/web/test_introducer.py index ba0a5beb9..69309d35b 100644 --- a/src/allmydata/test/web/test_introducer.py +++ b/src/allmydata/test/web/test_introducer.py @@ -83,12 +83,18 @@ def create_introducer_webish(reactor, port_assigner, basedir): with the node and its webish service. """ node.create_node_dir(basedir, "testing") - _, port_endpoint = port_assigner.assign(reactor) + main_tub_location, main_tub_endpoint = port_assigner.assign(reactor) + _, web_port_endpoint = port_assigner.assign(reactor) with open(join(basedir, "tahoe.cfg"), "w") as f: f.write( "[node]\n" - "tub.location = 127.0.0.1:1\n" + - "web.port = {}\n".format(port_endpoint) + "tub.port = {main_tub_endpoint}\n" + "tub.location = {main_tub_location}\n" + "web.port = {web_port_endpoint}\n".format( + main_tub_endpoint=main_tub_endpoint, + main_tub_location=main_tub_location, + web_port_endpoint=web_port_endpoint, + ) ) intro_node = yield create_introducer(basedir) @@ -211,7 +217,7 @@ class IntroducerRootTests(SyncTestCase): main_tub = Tub() main_tub.listenOn(b"tcp:0") main_tub.setLocation(b"tcp:127.0.0.1:1") - introducer_node = _IntroducerNode(config, main_tub, None, None, None) + introducer_node = _IntroducerNode(config, main_tub, None, None) introducer_service = introducer_node.getServiceNamed("introducer") for n in range(2): diff --git a/src/allmydata/test/web/test_logs.py b/src/allmydata/test/web/test_logs.py index 89ec7ba42..81ec357c0 100644 --- a/src/allmydata/test/web/test_logs.py +++ b/src/allmydata/test/web/test_logs.py @@ -17,10 +17,8 @@ if PY2: import json -from twisted.trial import unittest from twisted.internet.defer import inlineCallbacks -from eliot import log_call from autobahn.twisted.testing import create_memory_agent, MemoryReactorClockResolver, create_pumper @@ -48,6 +46,7 @@ from .matchers import ( from ..common import ( SyncTestCase, + AsyncTestCase, ) from ...web.logs import ( @@ -55,6 +54,8 @@ from ...web.logs import ( TokenAuthenticatedWebSocketServerProtocol, ) +from eliot import log_call + class StreamingEliotLogsTests(SyncTestCase): """ Tests for the log streaming resources created by ``create_log_resources``. @@ -75,18 +76,20 @@ class StreamingEliotLogsTests(SyncTestCase): ) -class TestStreamingLogs(unittest.TestCase): +class TestStreamingLogs(AsyncTestCase): """ Test websocket streaming of logs """ def setUp(self): + super(TestStreamingLogs, self).setUp() self.reactor = MemoryReactorClockResolver() self.pumper = create_pumper() self.agent = create_memory_agent(self.reactor, self.pumper, TokenAuthenticatedWebSocketServerProtocol) return self.pumper.start() def tearDown(self): + super(TestStreamingLogs, self).tearDown() return self.pumper.stop() @inlineCallbacks @@ -114,10 +117,10 @@ class TestStreamingLogs(unittest.TestCase): proto.transport.loseConnection() yield proto.is_closed - self.assertEqual(len(messages), 2) - self.assertEqual(messages[0]["action_type"], "test:cli:some-exciting-action") - self.assertEqual(messages[0]["arguments"], - ["hello", "good-\\xff-day", 123, {"a": 35}, [None]]) - self.assertEqual(messages[1]["action_type"], "test:cli:some-exciting-action") - self.assertEqual("started", messages[0]["action_status"]) - self.assertEqual("succeeded", messages[1]["action_status"]) + self.assertThat(len(messages), Equals(3)) + self.assertThat(messages[0]["action_type"], Equals("test:cli:some-exciting-action")) + self.assertThat(messages[0]["arguments"], + Equals(["hello", "good-\\xff-day", 123, {"a": 35}, [None]])) + self.assertThat(messages[1]["action_type"], Equals("test:cli:some-exciting-action")) + self.assertThat("started", Equals(messages[0]["action_status"])) + self.assertThat("succeeded", Equals(messages[1]["action_status"])) diff --git a/src/allmydata/test/web/test_root.py b/src/allmydata/test/web/test_root.py index ca3cc695d..228b8e449 100644 --- a/src/allmydata/test/web/test_root.py +++ b/src/allmydata/test/web/test_root.py @@ -11,6 +11,7 @@ if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 import time +import json from urllib.parse import ( quote, @@ -20,17 +21,27 @@ from bs4 import ( BeautifulSoup, ) -from twisted.trial import unittest from twisted.web.template import Tag from twisted.web.test.requesthelper import DummyRequest from twisted.application import service +from testtools.twistedsupport import succeeded +from twisted.internet.defer import ( + inlineCallbacks, + succeed, +) from ...storage_client import ( NativeStorageServer, StorageFarmBroker, ) -from ...web.root import RootElement +from ...web.root import ( + RootElement, + Root, +) from ...util.connection_status import ConnectionStatus +from ...crypto.ed25519 import ( + create_signing_keypair, +) from allmydata.web.root import URIHandler from allmydata.client import _Client @@ -44,7 +55,18 @@ from ..common import ( EMPTY_CLIENT_CONFIG, ) -class RenderSlashUri(unittest.TestCase): +from ..common import ( + SyncTestCase, + AsyncTestCase, +) + +from testtools.matchers import ( + Equals, + Contains, + AfterPreprocessing, +) + +class RenderSlashUri(SyncTestCase): """ Ensure that URIs starting with /uri?uri= only accept valid capabilities @@ -53,7 +75,9 @@ class RenderSlashUri(unittest.TestCase): def setUp(self): self.client = object() self.res = URIHandler(self.client) + super(RenderSlashUri, self).setUp() + @inlineCallbacks def test_valid_query_redirect(self): """ A syntactically valid capability given in the ``uri`` query argument @@ -64,9 +88,7 @@ class RenderSlashUri(unittest.TestCase): b"mukesarwdjxiyqsjinbfiiro6q7kgmmekocxfjcngh23oxwyxtzq:2:5:5874882" ) query_args = {b"uri": [cap]} - response_body = self.successResultOf( - render(self.res, query_args), - ) + response_body = yield render(self.res, query_args) soup = BeautifulSoup(response_body, 'html5lib') tag = assert_soup_has_tag_with_attributes( self, @@ -74,9 +96,9 @@ class RenderSlashUri(unittest.TestCase): u"meta", {u"http-equiv": "refresh"}, ) - self.assertIn( - quote(cap, safe=""), + self.assertThat( tag.attrs.get(u"content"), + Contains(quote(cap, safe="")), ) def test_invalid(self): @@ -84,16 +106,14 @@ class RenderSlashUri(unittest.TestCase): A syntactically invalid capbility results in an error. """ query_args = {b"uri": [b"not a capability"]} - response_body = self.successResultOf( - render(self.res, query_args), - ) - self.assertEqual( + response_body = render(self.res, query_args) + self.assertThat( response_body, - b"Invalid capability", + succeeded(AfterPreprocessing(bytes, Equals(b"Invalid capability"))), ) -class RenderServiceRow(unittest.TestCase): +class RenderServiceRow(SyncTestCase): def test_missing(self): """ minimally-defined static servers just need anonymous-storage-FURL @@ -127,5 +147,96 @@ class RenderServiceRow(unittest.TestCase): # Coerce `items` to list and pick the first item from it. item = list(items)[0] - self.assertEqual(item.slotData.get("version"), "") - self.assertEqual(item.slotData.get("nickname"), "") + self.assertThat(item.slotData.get("version"), Equals("")) + self.assertThat(item.slotData.get("nickname"), Equals("")) + + +class RenderRoot(AsyncTestCase): + + @inlineCallbacks + def test_root_json(self): + """ + The 'welcome' / root page renders properly with ?t=json when some + servers show None for available_space while others show a + valid int + + See also https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3852 + """ + ann = { + "anonymous-storage-FURL": "pb://w2hqnbaa25yw4qgcvghl5psa3srpfgw3@tcp:127.0.0.1:51309/vucto2z4fxment3vfxbqecblbf6zyp6x", + "permutation-seed-base32": "w2hqnbaa25yw4qgcvghl5psa3srpfgw3", + } + srv0 = NativeStorageServer(b"server_id0", ann, None, {}, EMPTY_CLIENT_CONFIG) + srv0.get_connection_status = lambda: ConnectionStatus(False, "summary0", {}, 0, 0) + + srv1 = NativeStorageServer(b"server_id1", ann, None, {}, EMPTY_CLIENT_CONFIG) + srv1.get_connection_status = lambda: ConnectionStatus(False, "summary1", {}, 0, 0) + # arrange for this server to have some valid available space + srv1.get_available_space = lambda: 12345 + + class FakeClient(_Client): + history = [] + stats_provider = object() + nickname = "" + nodeid = b"asdf" + _node_public_key = create_signing_keypair()[1] + introducer_clients = [] + helper = None + + def __init__(self): + service.MultiService.__init__(self) + self.storage_broker = StorageFarmBroker( + permute_peers=True, + tub_maker=None, + node_config=EMPTY_CLIENT_CONFIG, + ) + self.storage_broker.test_add_server(b"test-srv0", srv0) + self.storage_broker.test_add_server(b"test-srv1", srv1) + + root = Root(FakeClient(), now_fn=time.time) + + lines = [] + + req = DummyRequest(b"") + req.fields = {} + req.args = { + b"t": [b"json"], + } + + # for some reason, DummyRequest is already finished when we + # try to add a notifyFinish handler, so override that + # behavior. + + def nop(): + return succeed(None) + req.notifyFinish = nop + req.write = lines.append + + yield root.render(req) + + raw_js = b"".join(lines).decode("utf8") + js = json.loads(raw_js) + servers = js["servers"] + self.assertEquals(len(servers), 2) + self.assertIn( + { + "connection_status": "summary0", + "nodeid": "server_id0", + "last_received_data": 0, + "version": None, + "available_space": None, + "nickname": "" + }, + servers + ) + self.assertIn( + { + "connection_status": "summary1", + "nodeid": "server_id1", + "last_received_data": 0, + "version": None, + "available_space": 12345, + "nickname": "" + }, + servers + ) diff --git a/src/allmydata/test/web/test_web.py b/src/allmydata/test/web/test_web.py index 1c9d6b65c..03cd6e560 100644 --- a/src/allmydata/test/web/test_web.py +++ b/src/allmydata/test/web/test_web.py @@ -820,29 +820,37 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi """ d = self.GET("/?t=json") def _check(res): + """ + Check that the results are correct. + We can't depend on the order of servers in the output + """ decoded = json.loads(res) - expected = { - u'introducers': { - u'statuses': [], + self.assertEqual(decoded['introducers'], {u'statuses': []}) + actual_servers = decoded[u"servers"] + self.assertEquals(len(actual_servers), 2) + self.assertIn( + { + u"nodeid": u'other_nodeid', + u'available_space': 123456, + u'connection_status': u'summary', + u'last_received_data': 30, + u'nickname': u'other_nickname \u263b', + u'version': u'1.0', }, - u'servers': sorted([ - {u"nodeid": u'other_nodeid', - u'available_space': 123456, - u'connection_status': u'summary', - u'last_received_data': 30, - u'nickname': u'other_nickname \u263b', - u'version': u'1.0', - }, - {u"nodeid": u'disconnected_nodeid', - u'available_space': 123456, - u'connection_status': u'summary', - u'last_received_data': 35, - u'nickname': u'disconnected_nickname \u263b', - u'version': u'1.0', - }, - ], key=lambda o: sorted(o.items())), - } - self.assertEqual(expected, decoded) + actual_servers + ) + self.assertIn( + { + u"nodeid": u'disconnected_nodeid', + u'available_space': 123456, + u'connection_status': u'summary', + u'last_received_data': 35, + u'nickname': u'disconnected_nickname \u263b', + u'version': u'1.0', + }, + actual_servers + ) + d.addCallback(_check) return d diff --git a/src/allmydata/test/web/test_webish.py b/src/allmydata/test/web/test_webish.py index 12a04a6eb..4a77d21ae 100644 --- a/src/allmydata/test/web/test_webish.py +++ b/src/allmydata/test/web/test_webish.py @@ -90,10 +90,11 @@ class TahoeLAFSRequestTests(SyncTestCase): """ self._fields_test(b"GET", {}, b"", Equals(None)) - def test_form_fields(self): + def test_form_fields_if_filename_set(self): """ When a ``POST`` request is received, form fields are parsed into - ``TahoeLAFSRequest.fields``. + ``TahoeLAFSRequest.fields`` and the body is bytes (presuming ``filename`` + is set). """ form_data, boundary = multipart_formdata([ [param(u"name", u"foo"), @@ -121,6 +122,49 @@ class TahoeLAFSRequestTests(SyncTestCase): ), ) + def test_form_fields_if_name_is_file(self): + """ + When a ``POST`` request is received, form fields are parsed into + ``TahoeLAFSRequest.fields`` and the body is bytes when ``name`` + is set to ``"file"``. + """ + form_data, boundary = multipart_formdata([ + [param(u"name", u"foo"), + body(u"bar"), + ], + [param(u"name", u"file"), + body(u"some file contents"), + ], + ]) + self._fields_test( + b"POST", + {b"content-type": b"multipart/form-data; boundary=" + bytes(boundary, 'ascii')}, + form_data.encode("ascii"), + AfterPreprocessing( + lambda fs: { + k: fs.getvalue(k) + for k + in fs.keys() + }, + Equals({ + "foo": "bar", + "file": b"some file contents", + }), + ), + ) + + def test_form_fields_require_correct_mime_type(self): + """ + The body of a ``POST`` is not parsed into fields if its mime type is + not ``multipart/form-data``. + + Reproducer for https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3854 + """ + data = u'{"lalala": "lolo"}' + data = data.encode("utf-8") + self._fields_test(b"POST", {"content-type": "application/json"}, + data, Equals(None)) + class TahoeLAFSSiteTests(SyncTestCase): """ diff --git a/src/allmydata/util/_eliot_updates.py b/src/allmydata/util/_eliot_updates.py new file mode 100644 index 000000000..81db566a4 --- /dev/null +++ b/src/allmydata/util/_eliot_updates.py @@ -0,0 +1,195 @@ +""" +Bring in some Eliot updates from newer versions of Eliot than we can +depend on in Python 2. The implementations are copied from Eliot 1.14 and +only changed enough to add Python 2 compatibility. + +Every API in this module (except ``eliot_json_encoder``) should be obsolete as +soon as we depend on Eliot 1.14 or newer. + +When that happens: + +* replace ``capture_logging`` + with ``partial(eliot.testing.capture_logging, encoder_=eliot_json_encoder)`` +* replace ``validateLogging`` + with ``partial(eliot.testing.validateLogging, encoder_=eliot_json_encoder)`` +* replace ``MemoryLogger`` + with ``partial(eliot.MemoryLogger, encoder=eliot_json_encoder)`` + +Ported to Python 3. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 + +import json as pyjson +from functools import wraps, partial + +from eliot import ( + MemoryLogger as _MemoryLogger, +) + +from eliot.testing import ( + check_for_errors, + swap_logger, +) + +from .jsonbytes import AnyBytesJSONEncoder + +# There are currently a number of log messages that include non-UTF-8 bytes. +# Allow these, at least for now. Later when the whole test suite has been +# converted to our SyncTestCase or AsyncTestCase it will be easier to turn +# this off and then attribute log failures to specific codepaths so they can +# be fixed (and then not regressed later) because those instances will result +# in test failures instead of only garbage being written to the eliot log. +eliot_json_encoder = AnyBytesJSONEncoder + +class _CustomEncoderMemoryLogger(_MemoryLogger): + """ + Override message validation from the Eliot-supplied ``MemoryLogger`` to + use our chosen JSON encoder. + + This is only necessary on Python 2 where we use an old version of Eliot + that does not parameterize the encoder. + """ + def __init__(self, encoder=eliot_json_encoder): + """ + @param encoder: A JSONEncoder subclass to use when encoding JSON. + """ + self._encoder = encoder + super(_CustomEncoderMemoryLogger, self).__init__() + + def _validate_message(self, dictionary, serializer): + """Validate an individual message. + + As a side-effect, the message is replaced with its serialized contents. + + @param dictionary: A message C{dict} to be validated. Might be mutated + by the serializer! + + @param serializer: C{None} or a serializer. + + @raises TypeError: If a field name is not unicode, or the dictionary + fails to serialize to JSON. + + @raises eliot.ValidationError: If serializer was given and validation + failed. + """ + if serializer is not None: + serializer.validate(dictionary) + for key in dictionary: + if not isinstance(key, str): + if isinstance(key, bytes): + key.decode("utf-8") + else: + raise TypeError(dictionary, "%r is not unicode" % (key,)) + if serializer is not None: + serializer.serialize(dictionary) + + try: + pyjson.dumps(dictionary, cls=self._encoder) + except Exception as e: + raise TypeError("Message %s doesn't encode to JSON: %s" % (dictionary, e)) + +if PY2: + MemoryLogger = partial(_CustomEncoderMemoryLogger, encoder=eliot_json_encoder) +else: + MemoryLogger = partial(_MemoryLogger, encoder=eliot_json_encoder) + +def validateLogging( + assertion, *assertionArgs, **assertionKwargs +): + """ + Decorator factory for L{unittest.TestCase} methods to add logging + validation. + + 1. The decorated test method gets a C{logger} keyword argument, a + L{MemoryLogger}. + 2. All messages logged to this logger will be validated at the end of + the test. + 3. Any unflushed logged tracebacks will cause the test to fail. + + For example: + + from unittest import TestCase + from eliot.testing import assertContainsFields, validateLogging + + class MyTests(TestCase): + def assertFooLogging(self, logger): + assertContainsFields(self, logger.messages[0], {"key": 123}) + + + @param assertion: A callable that will be called with the + L{unittest.TestCase} instance, the logger and C{assertionArgs} and + C{assertionKwargs} once the actual test has run, allowing for extra + logging-related assertions on the effects of the test. Use L{None} if you + want the cleanup assertions registered but no custom assertions. + + @param assertionArgs: Additional positional arguments to pass to + C{assertion}. + + @param assertionKwargs: Additional keyword arguments to pass to + C{assertion}. + + @param encoder_: C{json.JSONEncoder} subclass to use when validating JSON. + """ + encoder_ = assertionKwargs.pop("encoder_", eliot_json_encoder) + def decorator(function): + @wraps(function) + def wrapper(self, *args, **kwargs): + skipped = False + + kwargs["logger"] = logger = MemoryLogger(encoder=encoder_) + self.addCleanup(check_for_errors, logger) + # TestCase runs cleanups in reverse order, and we want this to + # run *before* tracebacks are checked: + if assertion is not None: + self.addCleanup( + lambda: skipped + or assertion(self, logger, *assertionArgs, **assertionKwargs) + ) + try: + return function(self, *args, **kwargs) + except self.skipException: + skipped = True + raise + + return wrapper + + return decorator + +# PEP 8 variant: +validate_logging = validateLogging + +def capture_logging( + assertion, *assertionArgs, **assertionKwargs +): + """ + Capture and validate all logging that doesn't specify a L{Logger}. + + See L{validate_logging} for details on the rest of its behavior. + """ + encoder_ = assertionKwargs.pop("encoder_", eliot_json_encoder) + def decorator(function): + @validate_logging( + assertion, *assertionArgs, encoder_=encoder_, **assertionKwargs + ) + @wraps(function) + def wrapper(self, *args, **kwargs): + logger = kwargs["logger"] + previous_logger = swap_logger(logger) + + def cleanup(): + swap_logger(previous_logger) + + self.addCleanup(cleanup) + return function(self, *args, **kwargs) + + return wrapper + + return decorator diff --git a/src/allmydata/util/deferredutil.py b/src/allmydata/util/deferredutil.py index ed2a11ee4..782663e8b 100644 --- a/src/allmydata/util/deferredutil.py +++ b/src/allmydata/util/deferredutil.py @@ -4,24 +4,13 @@ Utilities for working with Twisted Deferreds. Ported to Python 3. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -from future.utils import PY2 -if PY2: - from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 - import time +from functools import wraps -try: - from typing import ( - Callable, - Any, - ) -except ImportError: - pass +from typing import ( + Callable, + Any, +) from foolscap.api import eventually from eliot.twisted import ( @@ -231,3 +220,17 @@ def until( yield action() if condition(): break + + +def async_to_deferred(f): + """ + Wrap an async function to return a Deferred instead. + + Maybe solution to https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3886 + """ + + @wraps(f) + def not_async(*args, **kwargs): + return defer.Deferred.fromCoroutine(f(*args, **kwargs)) + + return not_async diff --git a/src/allmydata/util/eliotutil.py b/src/allmydata/util/eliotutil.py index 4e48fbb9f..789ef38ff 100644 --- a/src/allmydata/util/eliotutil.py +++ b/src/allmydata/util/eliotutil.py @@ -16,12 +16,14 @@ from __future__ import ( ) __all__ = [ + "MemoryLogger", "inline_callbacks", "eliot_logging_service", "opt_eliot_destination", "opt_help_eliot_destinations", "validateInstanceOf", "validateSetMembership", + "capture_logging", ] from future.utils import PY2 @@ -32,7 +34,7 @@ from six import ensure_text from sys import ( stdout, ) -from functools import wraps, partial +from functools import wraps from logging import ( INFO, Handler, @@ -66,8 +68,6 @@ from eliot.twisted import ( DeferredContext, inline_callbacks, ) -from eliot.testing import capture_logging as eliot_capture_logging - from twisted.python.usage import ( UsageError, ) @@ -87,8 +87,11 @@ from twisted.internet.defer import ( ) from twisted.application.service import Service -from .jsonbytes import AnyBytesJSONEncoder - +from ._eliot_updates import ( + MemoryLogger, + eliot_json_encoder, + capture_logging, +) def validateInstanceOf(t): """ @@ -306,7 +309,7 @@ class _DestinationParser(object): rotateLength=rotate_length, maxRotatedFiles=max_rotated_files, ) - return lambda reactor: FileDestination(get_file(), AnyBytesJSONEncoder) + return lambda reactor: FileDestination(get_file(), eliot_json_encoder) _parse_destination_description = _DestinationParser().parse @@ -327,10 +330,3 @@ def log_call_deferred(action_type): return DeferredContext(d).addActionFinish() return logged_f return decorate_log_call_deferred - -# On Python 3, encoding bytes to JSON doesn't work, so we have a custom JSON -# encoder we want to use when validating messages. -if PY2: - capture_logging = eliot_capture_logging -else: - capture_logging = partial(eliot_capture_logging, encoder_=AnyBytesJSONEncoder) diff --git a/src/allmydata/util/encodingutil.py b/src/allmydata/util/encodingutil.py index f32710688..5e28f59fe 100644 --- a/src/allmydata/util/encodingutil.py +++ b/src/allmydata/util/encodingutil.py @@ -320,6 +320,9 @@ def quote_output(s, quotemarks=True, quote_newlines=None, encoding=None): # Although the problem is that doesn't work in Python 3.6, only 3.7 or # later... For now not thinking about it, just returning unicode since # that is the right thing to do on Python 3. + # + # Now that Python 3.7 is the minimum, this can in theory be done: + # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3866 result = result.decode(encoding) return result diff --git a/src/allmydata/util/pid.py b/src/allmydata/util/pid.py new file mode 100644 index 000000000..f12c201d1 --- /dev/null +++ b/src/allmydata/util/pid.py @@ -0,0 +1,120 @@ +import psutil + +# the docs are a little misleading, but this is either WindowsFileLock +# or UnixFileLock depending upon the platform we're currently on +from filelock import FileLock, Timeout + + +class ProcessInTheWay(Exception): + """ + our pidfile points at a running process + """ + + +class InvalidPidFile(Exception): + """ + our pidfile isn't well-formed + """ + + +class CannotRemovePidFile(Exception): + """ + something went wrong removing the pidfile + """ + + +def _pidfile_to_lockpath(pidfile): + """ + internal helper. + :returns FilePath: a path to use for file-locking the given pidfile + """ + return pidfile.sibling("{}.lock".format(pidfile.basename())) + + +def parse_pidfile(pidfile): + """ + :param FilePath pidfile: + :returns tuple: 2-tuple of pid, creation-time as int, float + :raises InvalidPidFile: on error + """ + with pidfile.open("r") as f: + content = f.read().decode("utf8").strip() + try: + pid, starttime = content.split() + pid = int(pid) + starttime = float(starttime) + except ValueError: + raise InvalidPidFile( + "found invalid PID file in {}".format( + pidfile + ) + ) + return pid, starttime + + +def check_pid_process(pidfile): + """ + If another instance appears to be running already, raise an + exception. Otherwise, write our PID + start time to the pidfile + and arrange to delete it upon exit. + + :param FilePath pidfile: the file to read/write our PID from. + + :raises ProcessInTheWay: if a running process exists at our PID + """ + lock_path = _pidfile_to_lockpath(pidfile) + + try: + # a short timeout is fine, this lock should only be active + # while someone is reading or deleting the pidfile .. and + # facilitates testing the locking itself. + with FileLock(lock_path.path, timeout=2): + # check if we have another instance running already + if pidfile.exists(): + pid, starttime = parse_pidfile(pidfile) + try: + # if any other process is running at that PID, let the + # user decide if this is another legitimate + # instance. Automated programs may use the start-time to + # help decide this (if the PID is merely recycled, the + # start-time won't match). + psutil.Process(pid) + raise ProcessInTheWay( + "A process is already running as PID {}".format(pid) + ) + except psutil.NoSuchProcess: + print( + "'{pidpath}' refers to {pid} that isn't running".format( + pidpath=pidfile.path, + pid=pid, + ) + ) + # nothing is running at that PID so it must be a stale file + pidfile.remove() + + # write our PID + start-time to the pid-file + proc = psutil.Process() + with pidfile.open("w") as f: + f.write("{} {}\n".format(proc.pid, proc.create_time()).encode("utf8")) + except Timeout: + raise ProcessInTheWay( + "Another process is still locking {}".format(pidfile.path) + ) + + +def cleanup_pidfile(pidfile): + """ + Remove the pidfile specified (respecting locks). If anything at + all goes wrong, `CannotRemovePidFile` is raised. + """ + lock_path = _pidfile_to_lockpath(pidfile) + with FileLock(lock_path.path): + try: + pidfile.remove() + except Exception as e: + raise CannotRemovePidFile( + "Couldn't remove '{pidfile}': {err}.".format( + pidfile=pidfile.path, + err=e, + ) + ) diff --git a/src/allmydata/web/root.py b/src/allmydata/web/root.py index 1debc1d10..f1a8569d6 100644 --- a/src/allmydata/web/root.py +++ b/src/allmydata/web/root.py @@ -297,14 +297,12 @@ class Root(MultiFormatResource): } return json.dumps(result, indent=1) + "\n" - def _describe_known_servers(self, broker): - return sorted(list( + return list( self._describe_server(server) for server in broker.get_known_servers() - ), key=lambda o: sorted(o.items())) - + ) def _describe_server(self, server): status = server.get_connection_status() diff --git a/src/allmydata/web/storage.py b/src/allmydata/web/storage.py index f2f021a15..e568d5ed5 100644 --- a/src/allmydata/web/storage.py +++ b/src/allmydata/web/storage.py @@ -256,8 +256,8 @@ class StorageStatusElement(Element): if so_far["corrupt-shares"]: add("Corrupt shares:", - T.ul( (T.li( ["SI %s shnum %d" % corrupt_share - for corrupt_share in so_far["corrupt-shares"] ] + T.ul( (T.li( ["SI %s shnum %d" % (si, shnum) + for si, shnum in so_far["corrupt-shares"] ] )))) return tag("Current cycle:", p) @@ -267,7 +267,8 @@ class StorageStatusElement(Element): h = lc.get_state()["history"] if not h: return "" - last = h[max(h.keys())] + biggest = str(max(int(k) for k in h.keys())) + last = h[biggest] start, end = last["cycle-start-finish-times"] tag("Last complete cycle (which took %s and finished %s ago)" @@ -290,8 +291,8 @@ class StorageStatusElement(Element): if last["corrupt-shares"]: add("Corrupt shares:", - T.ul( (T.li( ["SI %s shnum %d" % corrupt_share - for corrupt_share in last["corrupt-shares"] ] + T.ul( (T.li( ["SI %s shnum %d" % (si, shnum) + for si, shnum in last["corrupt-shares"] ] )))) return tag(p) diff --git a/src/allmydata/webish.py b/src/allmydata/webish.py index 9b63a220c..519b3e1f0 100644 --- a/src/allmydata/webish.py +++ b/src/allmydata/webish.py @@ -114,7 +114,8 @@ class TahoeLAFSRequest(Request, object): self.path, argstring = x self.args = parse_qs(argstring, 1) - if self.method == b'POST': + content_type = (self.requestHeaders.getRawHeaders("content-type") or [""])[0] + if self.method == b'POST' and content_type.split(";")[0] in ("multipart/form-data", "application/x-www-form-urlencoded"): # We use FieldStorage here because it performs better than # cgi.parse_multipart(self.content, pdict) which is what # twisted.web.http.Request uses. diff --git a/tests.nix b/tests.nix new file mode 100644 index 000000000..dd477c273 --- /dev/null +++ b/tests.nix @@ -0,0 +1,88 @@ +let + sources = import nix/sources.nix; +in +# See default.nix for documentation about parameters. +{ pkgsVersion ? "nixpkgs-21.11" +, pkgs ? import sources.${pkgsVersion} { } +, pypiData ? sources.pypi-deps-db +, pythonVersion ? "python37" +, mach-nix ? import sources.mach-nix { + inherit pkgs pypiData; + python = pythonVersion; + } +}@args: +let + # We would like to know the test requirements but mach-nix does not directly + # expose this information to us. However, it is perfectly capable of + # determining it if we ask right... This is probably not meant to be a + # public mach-nix API but we pinned mach-nix so we can deal with mach-nix + # upgrade breakage in our own time. + mach-lib = import "${sources.mach-nix}/mach_nix/nix/lib.nix" { + inherit pkgs; + lib = pkgs.lib; + }; + tests_require = (mach-lib.extract "python37" ./. "extras_require" ).extras_require.test; + + # Get the Tahoe-LAFS package itself. This does not include test + # requirements and we don't ask for test requirements so that we can just + # re-use the normal package if it is already built. + tahoe-lafs = import ./. args; + + # If we want to get tahoe-lafs into a Python environment with a bunch of + # *other* Python modules and let them interact in the usual way then we have + # to ask mach-nix for tahoe-lafs and those other Python modules in the same + # way - i.e., using `requirements`. The other tempting mechanism, + # `packagesExtra`, inserts an extra layer of Python environment and prevents + # normal interaction between Python modules (as well as usually producing + # file collisions in the packages that are both runtime and test + # dependencies). To get the tahoe-lafs we just built into the environment, + # put it into nixpkgs using an overlay and tell mach-nix to get tahoe-lafs + # from nixpkgs. + overridesPre = [(self: super: { inherit tahoe-lafs; })]; + providers = tahoe-lafs.meta.mach-nix.providers // { tahoe-lafs = "nixpkgs"; }; + + # Make the Python environment in which we can run the tests. + python-env = mach-nix.mkPython { + # Get the packaging fixes we already know we need from putting together + # the runtime package. + inherit (tahoe-lafs.meta.mach-nix) _; + # Share the runtime package's provider configuration - combined with our + # own that causes the right tahoe-lafs to be picked up. + inherit providers overridesPre; + requirements = '' + # Here we pull in the Tahoe-LAFS package itself. + tahoe-lafs + + # Unfortunately mach-nix misses all of the Python dependencies of the + # tahoe-lafs satisfied from nixpkgs. Drag them in here. This gives a + # bit of a pyrrhic flavor to the whole endeavor but maybe mach-nix will + # fix this soon. + # + # https://github.com/DavHau/mach-nix/issues/123 + # https://github.com/DavHau/mach-nix/pull/386 + ${tahoe-lafs.requirements} + + # And then all of the test-only dependencies. + ${builtins.concatStringsSep "\n" tests_require} + + # txi2p-tahoe is another dependency with an environment marker that + # mach-nix doesn't automatically pick up. + txi2p-tahoe + ''; + }; +in +# Make a derivation that runs the unit test suite. +pkgs.runCommand "tahoe-lafs-tests" { } '' + export TAHOE_LAFS_HYPOTHESIS_PROFILE=ci + ${python-env}/bin/python -m twisted.trial -j $NIX_BUILD_CORES allmydata + + # It's not cool to put the whole _trial_temp into $out because it has weird + # files in it we don't want in the store. Plus, even all of the less weird + # files are mostly just trash that's not meaningful if the test suite passes + # (which is the only way we get $out anyway). + # + # The build log itself is typically available from `nix-store --read-log` so + # we don't need to record that either. + echo "passed" >$out + +'' diff --git a/pyproject.toml b/towncrier.toml similarity index 80% rename from pyproject.toml rename to towncrier.toml index 7c97001aa..e093d0cc4 100644 --- a/pyproject.toml +++ b/towncrier.toml @@ -1,13 +1,3 @@ -# https://setuptools.pypa.io/en/latest/build_meta.html -# https://github.com/pypa/setuptools_scm -[build-system] -requires = ["setuptools>=45", "wheel", "setuptools_scm>=6.2"] -build-backend = "setuptools.build_meta" - -[tool.setuptools_scm] -write_to = "src/allmydata/_version.py" -tag_regex = "^tahoe-lafs-(?P[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$" - [tool.towncrier] package_dir = "src" package = "allmydata" @@ -22,6 +12,11 @@ tag_regex = "^tahoe-lafs-(?P[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)? "~", ] + [[tool.towncrier.type]] + directory = "security" + name = "Security-related Changes" + showcontent = true + [[tool.towncrier.type]] directory = "incompat" name = "Backwards Incompatible Changes" diff --git a/tox.ini b/tox.ini index 4799b92f5..e4879914d 100644 --- a/tox.ini +++ b/tox.ini @@ -7,18 +7,19 @@ # the tox-gh-actions package. [gh-actions] python = - 2.7: py27-coverage,codechecks - 3.6: py36-coverage - 3.7: py37-coverage,typechecks,codechecks3 + 3.7: py37-coverage,typechecks,codechecks 3.8: py38-coverage 3.9: py39-coverage - pypy-3.7: pypy3 + 3.10: py310-coverage + pypy-3.7: pypy37 + pypy-3.8: pypy38 + pypy-3.9: pypy39 [pytest] twisted = 1 [tox] -envlist = typechecks,codechecks,codechecks3,py{27,36,37,38,39}-{coverage},pypy27,pypy3,integration,integration3 +envlist = typechecks,codechecks,py{37,38,39,310}-{coverage},pypy27,pypy37,pypy38,pypy39,integration minversion = 2.4 [testenv] @@ -36,12 +37,10 @@ deps = # happening at the time. The versions selected here are just the current # versions at the time. Bumping them to keep up with future releases is # fine as long as those releases are known to actually work. - # - # For now these are versions that support Python 2. - pip==20.3.4 - setuptools==44.1.1 - wheel==0.36.2 - subunitreporter==19.3.2 + pip==22.0.3 + setuptools==60.9.1 + wheel==0.37.1 + subunitreporter==22.2.0 # As an exception, we don't pin certifi because it contains CA # certificates which necessarily change over time. Pinning this is # guaranteed to cause things to break eventually as old certificates @@ -51,7 +50,7 @@ deps = # suffering we're trying to avoid with the above pins. certifi # VCS hooks support - py36,!coverage: pre-commit + py37,!coverage: pre-commit # We add usedevelop=False because testing against a true installation gives # more useful results. @@ -90,40 +89,20 @@ commands = coverage: coverage report [testenv:integration] -setenv = - COVERAGE_PROCESS_START=.coveragerc -commands = - # NOTE: 'run with "py.test --keep-tempdir -s -v integration/" to debug failures' - py.test --timeout=1800 --coverage -v {posargs:integration} - coverage combine - coverage report - - -[testenv:integration3] basepython = python3 +platform = mylinux: linux + mymacos: darwin + mywindows: win32 setenv = COVERAGE_PROCESS_START=.coveragerc commands = - python --version # NOTE: 'run with "py.test --keep-tempdir -s -v integration/" to debug failures' - py.test --timeout=1800 --coverage -v {posargs:integration} + py.test --timeout=1800 --coverage -s -v {posargs:integration} coverage combine coverage report -# Once 2.7 is dropped, this can be removed. It just does flake8 with Python 2 -# since that can give different results than flake8 on Python 3. [testenv:codechecks] -basepython = python2.7 -setenv = - # If no positional arguments are given, try to run the checks on the - # entire codebase, including various pieces of supporting code. - DEFAULT_FILES=src integration static misc -commands = - flake8 {posargs:{env:DEFAULT_FILES}} - - -[testenv:codechecks3] basepython = python3 deps = # Newer versions of PyLint have buggy configuration @@ -148,8 +127,8 @@ commands = # If towncrier.check fails, you forgot to add a towncrier news # fragment explaining the change in this branch. Create one at # `newsfragments/.` with some text for the news - # file. See pyproject.toml for legal values. - python -m towncrier.check --config pyproject.toml + # file. See towncrier.toml for legal values. + python -m towncrier.check --config towncrier.toml [testenv:typechecks] @@ -162,6 +141,7 @@ deps = types-six types-PyYAML types-pkg_resources + types-pyOpenSSL git+https://github.com/warner/foolscap # Twisted 21.2.0 introduces some type hints which we are not yet # compatible with. @@ -177,7 +157,7 @@ deps = certifi towncrier==21.3.0 commands = - python -m towncrier --draft --config pyproject.toml + python -m towncrier --draft --config towncrier.toml [testenv:news] # On macOS, git invoked from Tox needs $HOME. @@ -189,7 +169,7 @@ deps = certifi towncrier==21.3.0 commands = - python -m towncrier --yes --config pyproject.toml + python -m towncrier --yes --config towncrier.toml # commit the changes git commit -m "update NEWS.txt for release" @@ -206,17 +186,6 @@ commands = flogtool --version python misc/build_helpers/run-deprecations.py --package allmydata --warnings={env:TAHOE_LAFS_WARNINGS_LOG:_trial_temp/deprecation-warnings.log} trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors} {posargs:allmydata} -[testenv:checkmemory] -commands = - rm -rf _test_memory - python src/allmydata/test/check_memory.py upload - python src/allmydata/test/check_memory.py upload-self - python src/allmydata/test/check_memory.py upload-POST - python src/allmydata/test/check_memory.py download - python src/allmydata/test/check_memory.py download-GET - python src/allmydata/test/check_memory.py download-GET-slow - python src/allmydata/test/check_memory.py receive - # Use 'tox -e docs' to check formatting and cross-references in docs .rst # files. The published docs are built by code run over at readthedocs.org, # which does not use this target (but does something similar). @@ -228,29 +197,19 @@ commands = # your web browser. [testenv:docs] -# we pin docutils because of https://sourceforge.net/p/docutils/bugs/301/ -# which asserts when it reads links to .svg files (e.g. about.rst) deps = - sphinx - docutils==0.12 - recommonmark - sphinx_rtd_theme + -r docs/requirements.txt # normal install is not needed for docs, and slows things down skip_install = True commands = sphinx-build -W -b html -d {toxinidir}/docs/_build/doctrees {toxinidir}/docs {toxinidir}/docs/_build/html [testenv:pyinstaller] -# We override this to pass --no-use-pep517 because pyinstaller (3.4, at least) -# is broken when this feature is enabled. -install_command = python -m pip install --no-use-pep517 {opts} {packages} extras = deps = {[testenv]deps} packaging - # PyInstaller 4.0 drops Python 2 support. When we finish porting to - # Python 3 we can reconsider this constraint. - pyinstaller < 4.0 + pyinstaller pefile ; platform_system == "Windows" # Setting PYTHONHASHSEED to a known value assists with reproducible builds. # See https://pyinstaller.readthedocs.io/en/stable/advanced-topics.html#creating-a-reproducible-build @@ -260,6 +219,7 @@ commands= pyinstaller -y --clean pyinstaller.spec [testenv:tarballs] +basepython = python3 deps = build commands = python -m build