Compare commits
246 Commits
master
...
2438.magic
Author | SHA1 | Date |
---|---|---|
Daira Hopwood | 1ea50421e2 | |
Daira Hopwood | 0417652d56 | |
Daira Hopwood | e150f0eb5c | |
Daira Hopwood | b06cf8d4c2 | |
Daira Hopwood | af0c367cbf | |
Daira Hopwood | 62034fd57d | |
Daira Hopwood | 5cd41563b4 | |
Daira Hopwood | f9d11ea147 | |
David Stainton | 1516c698e1 | |
David Stainton | 02b099c821 | |
David Stainton | b7cd4fd07d | |
David Stainton | 9a775c2d2d | |
David Stainton | 5eec09aeb7 | |
David Stainton | 20cdcb552d | |
David Stainton | 28bb534a1c | |
David Stainton | 8c488c5d37 | |
David Stainton | e1cddeeedf | |
David Stainton | ef2a8b1bfd | |
David Stainton | 0b91596abb | |
David Stainton | 39f0839645 | |
David Stainton | b11bebc129 | |
David Stainton | 871408f76a | |
David Stainton | 5bd4e833c8 | |
Daira Hopwood | d2da8d3328 | |
Daira Hopwood | 87941c0e2e | |
Daira Hopwood | 160762dea3 | |
Daira Hopwood | 0aa0a7452d | |
Daira Hopwood | 3adf4b96f3 | |
Daira Hopwood | c20e008d80 | |
Daira Hopwood | 4a51aa2567 | |
Daira Hopwood | 6e43d9ddf3 | |
Daira Hopwood | 4b056ea6d3 | |
Daira Hopwood | ff1ad20392 | |
Daira Hopwood | 8771a41000 | |
Daira Hopwood | ca99a9e21e | |
Daira Hopwood | c34f054c15 | |
Daira Hopwood | 285ecdc820 | |
Daira Hopwood | 314fd626e9 | |
Daira Hopwood | a3c4364d7b | |
Daira Hopwood | c8f5c690ba | |
David Stainton | e6b98b54ad | |
David Stainton | 48ae3e0be5 | |
David Stainton | 257d6665d2 | |
David Stainton | bbf47cb278 | |
Daira Hopwood | 5b3b8463ae | |
Daira Hopwood | 8462a03ed0 | |
Daira Hopwood | 754f99486b | |
Daira Hopwood | bfda47fef4 | |
Daira Hopwood | 472e267075 | |
Ramakrishnan Muthukrishnan | 982c89a835 | |
Ramakrishnan Muthukrishnan | 9bf5f5c312 | |
Ramakrishnan Muthukrishnan | eac7aefe26 | |
Ramakrishnan Muthukrishnan | ef3cdd7db1 | |
David Stainton | fbd400e6e5 | |
David Stainton | 09ee9d0e63 | |
David Stainton | dfafd0e29c | |
David Stainton | 24ec381dda | |
David Stainton | 47dc2c275c | |
David Stainton | 264138b44f | |
David Stainton | cdc4bc8e76 | |
David Stainton | 354280c26e | |
Daira Hopwood | f113db62ac | |
Daira Hopwood | 552342f8b6 | |
Daira Hopwood | 6b8b952211 | |
Daira Hopwood | 1ba8b96726 | |
Daira Hopwood | 0fde8b493a | |
David Stainton | d0cadd17fd | |
David Stainton | 0dc96dd951 | |
Daira Hopwood | 1957a30c52 | |
Daira Hopwood | 76b3e4038a | |
David Stainton | 7297bb5fc1 | |
David Stainton | 14ea4497ae | |
Daira Hopwood | decc5ff412 | |
Daira Hopwood | db242c7652 | |
Daira Hopwood | fc14b47175 | |
Daira Hopwood | 9eadfc5154 | |
Daira Hopwood | 5b23b354c6 | |
David Stainton | 8a98a99979 | |
Daira Hopwood | 36680042d3 | |
Ramakrishnan Muthukrishnan | 8adc28bb9d | |
Ramakrishnan Muthukrishnan | e49d76f68d | |
Ramakrishnan Muthukrishnan | d36521997e | |
Ramakrishnan Muthukrishnan | 4bab676316 | |
Daira Hopwood | 6eb272856e | |
Daira Hopwood | 4f7c02c651 | |
Daira Hopwood | f2d40cea9b | |
Daira Hopwood | 5377f3bbd5 | |
Daira Hopwood | 144f4ff5ff | |
Daira Hopwood | 8eb81b84f2 | |
Daira Hopwood | b4c0a140e3 | |
Daira Hopwood | 846d61b894 | |
Daira Hopwood | 4850c06c4a | |
Daira Hopwood | 0f33e04258 | |
David Stainton | b01bc80117 | |
David Stainton | 45b8e578eb | |
David Stainton | 084b92535c | |
David Stainton | ea39f11c57 | |
David Stainton | 12ab89c6b5 | |
David Stainton | 561ff74fa4 | |
David Stainton | 2b7e5c1481 | |
Daira Hopwood | 27fe50ff24 | |
Daira Hopwood | c7091ef6e6 | |
Daira Hopwood | 50c2cc575f | |
Daira Hopwood | b51563fe1e | |
Daira Hopwood | cde94e4eb6 | |
David Stainton | 067edc7793 | |
David Stainton | ecfc33bb05 | |
David Stainton | 825e8d9e9a | |
David Stainton | 6e42c00c9c | |
David Stainton | 3fe005def0 | |
Daira Hopwood | 69a5df6fea | |
Daira Hopwood | 8182752333 | |
David Stainton | bcfdcf2877 | |
Daira Hopwood | 3c77914519 | |
Daira Hopwood | 4045f5844f | |
Daira Hopwood | 5e8105fa91 | |
Daira Hopwood | ce2546c471 | |
Daira Hopwood | 8891b7d550 | |
Daira Hopwood | 6e6a771847 | |
Daira Hopwood | fc31a4f5ef | |
Daira Hopwood | d993e015aa | |
Daira Hopwood | afa9d9ae10 | |
Daira Hopwood | ad4b4843c2 | |
Daira Hopwood | 2bd93d2e02 | |
Daira Hopwood | f423e72062 | |
David Stainton | 57fc30be44 | |
David Stainton | a6b75aaa5d | |
David Stainton | 16c72decc0 | |
David Stainton | 42602ba290 | |
David Stainton | 598695010a | |
David Stainton | 444c732f81 | |
David Stainton | d57d30c60f | |
David Stainton | c7e0fc52e3 | |
David Stainton | 12fc5e6161 | |
David Stainton | 3b6f51abdc | |
David Stainton | 3612ca0504 | |
David Stainton | 847e8e99e9 | |
David Stainton | 48a8dfde4a | |
David Stainton | a7f22dff67 | |
David Stainton | 888089a689 | |
David Stainton | 4e8d8e076a | |
David Stainton | bec31e9810 | |
David Stainton | ccef5c4c44 | |
David Stainton | f6567d4833 | |
David Stainton | d7d429deb3 | |
David Stainton | 3ea5031261 | |
David Stainton | 0118c5105b | |
David Stainton | 88155f466c | |
David Stainton | 2d1e05ed4d | |
David Stainton | f0cddcb8da | |
David Stainton | 3e571fc4cf | |
David Stainton | 606b2aa029 | |
David Stainton | 11b218f8e5 | |
David Stainton | 3f80101f68 | |
David Stainton | b82f66de07 | |
David Stainton | bdffc3c484 | |
David Stainton | 21da540e5e | |
David Stainton | 3fb841adc4 | |
Daira Hopwood | 38a6d60f1a | |
Daira Hopwood | e61130395d | |
Daira Hopwood | 8f3fe4dffe | |
Daira Hopwood | 51f0c4de69 | |
David Stainton | 4b049fca56 | |
David Stainton | 4e714179f9 | |
Daira Hopwood | 7ee8ab4ac7 | |
Daira Hopwood | 32cad77b95 | |
David Stainton | 31d6430d10 | |
David Stainton | 8cefb7a680 | |
David Stainton | 4a798883be | |
David Stainton | 24d71f2f01 | |
David Stainton | 8a5c01cdab | |
David Stainton | 5fe1e13120 | |
David Stainton | bdb4527d85 | |
David Stainton | 5d154d2b7a | |
David Stainton | 239a8653af | |
David Stainton | 10264dd988 | |
David Stainton | 4db3adbbd1 | |
David Stainton | d1d43d72da | |
David Stainton | df1c93629d | |
David Stainton | a5c0907580 | |
David Stainton | a76d3546bc | |
David Stainton | 6490bc838b | |
David Stainton | 9378c5f1d4 | |
David Stainton | 378756d130 | |
David Stainton | ea48edf682 | |
David Stainton | 6046abbb39 | |
David Stainton | a2ed2965e3 | |
David Stainton | a3d79bcd6d | |
David Stainton | 035f7cf55d | |
David Stainton | e8de9c3299 | |
David Stainton | 868c658d05 | |
David Stainton | d384d4edca | |
David Stainton | 14603bce7c | |
David Stainton | 1e710187c4 | |
David Stainton | 9deae9b587 | |
David Stainton | 1223cd5610 | |
David Stainton | 632e5e1db0 | |
David Stainton | 2e96671a48 | |
David Stainton | bf34685ae0 | |
David Stainton | 331dd8bd9d | |
David Stainton | cf38deebf7 | |
David Stainton | 646fc94cb2 | |
David Stainton | 8f3c04ab8c | |
David Stainton | b448905b50 | |
David Stainton | 68fac1ca24 | |
David Stainton | a845fe6da3 | |
David Stainton | 7c937eabe7 | |
David Stainton | 8b20399796 | |
Daira Hopwood | e68b09b081 | |
Daira Hopwood | 3120499069 | |
Daira Hopwood | 567cabc195 | |
Daira Hopwood | d2950aaf62 | |
David Stainton | 5a8503a730 | |
David Stainton | 899d8f5742 | |
Daira Hopwood | 1ce31cc9a7 | |
Daira Hopwood | 1ef48fe868 | |
Daira Hopwood | b19eb94c7e | |
Daira Hopwood | c27eafcb5c | |
Daira Hopwood | aaed7ee8d7 | |
Daira Hopwood | 7802d3e350 | |
Daira Hopwood | a2cb04fe7d | |
Daira Hopwood | 3cffe2599e | |
Daira Hopwood | 3854501e4b | |
Daira Hopwood | cd0b360912 | |
David Stainton | 184a716a14 | |
David Stainton | abe591e5f8 | |
David Stainton | 364526fd66 | |
David Stainton | 6aefeb2ea7 | |
David Stainton | 1a5726eda4 | |
David Stainton | 9c4b780c35 | |
David Stainton | 036b586cfd | |
Daira Hopwood | f6fbc7255b | |
Daira Hopwood | 7223c2821e | |
David Stainton | 91dab93b3b | |
Daira Hopwood | c5916adacd | |
Daira Hopwood | d4a708d16e | |
Daira Hopwood | ceb84605b9 | |
Daira Hopwood | ae7808c212 | |
Daira Hopwood | a9ecbba380 | |
Daira Hopwood | 39c8b97503 | |
Daira Hopwood | 164b23c399 | |
Daira Hopwood | 67239ffa9c | |
Daira Hopwood | 1607724c59 | |
Daira Hopwood | 9c78062548 | |
Daira Hopwood | 5153dff291 | |
Daira Hopwood | b37478195f |
|
@ -430,9 +430,9 @@ SFTP, FTP
|
||||||
|
|
||||||
Drop-Upload
|
Drop-Upload
|
||||||
|
|
||||||
As of Tahoe-LAFS v1.9.0, a node running on Linux can be configured to
|
A node running on Linux or Windows can be configured to automatically
|
||||||
automatically upload files that are created or changed in a specified
|
upload files that are created or changed in a specified local directory.
|
||||||
local directory. See drop-upload.rst_ for details.
|
See `drop-upload.rst`_ for details.
|
||||||
|
|
||||||
.. _download-status.rst: frontends/download-status.rst
|
.. _download-status.rst: frontends/download-status.rst
|
||||||
.. _CLI.rst: frontends/CLI.rst
|
.. _CLI.rst: frontends/CLI.rst
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
.. -*- coding: utf-8-with-signature -*-
|
.. -*- coding: utf-8-with-signature -*-
|
||||||
|
|
||||||
===============================
|
================================
|
||||||
Tahoe-LAFS Drop-Upload Frontend
|
Tahoe-LAFS Magic Folder Frontend
|
||||||
===============================
|
================================
|
||||||
|
|
||||||
1. `Introduction`_
|
1. `Introduction`_
|
||||||
2. `Configuration`_
|
2. `Configuration`_
|
||||||
|
@ -12,19 +12,16 @@ Tahoe-LAFS Drop-Upload Frontend
|
||||||
Introduction
|
Introduction
|
||||||
============
|
============
|
||||||
|
|
||||||
The drop-upload frontend allows an upload to a Tahoe-LAFS grid to be triggered
|
The Magic Folder frontend allows an upload to a Tahoe-LAFS grid to be triggered
|
||||||
automatically whenever a file is created or changed in a specific local
|
automatically whenever a file is created or changed in a specific local
|
||||||
directory. This is a preview of a feature that we expect to support across
|
directory. It currently works on Linux and Windows.
|
||||||
several platforms, but it currently works only on Linux.
|
|
||||||
|
|
||||||
The implementation was written as a prototype at the First International
|
The implementation was written as a prototype at the First International
|
||||||
Tahoe-LAFS Summit in June 2011, and is not currently in as mature a state as
|
Tahoe-LAFS Summit in June 2011, and is not currently in as mature a state as
|
||||||
the other frontends (web, CLI, SFTP and FTP). This means that you probably
|
the other frontends (web, CLI, SFTP and FTP). This means that you probably
|
||||||
should not keep important data in the upload directory, and should not rely
|
should not rely on all changes to files in the local directory to result in
|
||||||
on all changes to files in the local directory to result in successful uploads.
|
successful uploads. There might be (and have been) incompatible changes to
|
||||||
There might be (and have been) incompatible changes to how the feature is
|
how the feature is configured.
|
||||||
configured. There is even the possibility that it may be abandoned, for
|
|
||||||
example if unsolveable reliability issues are found.
|
|
||||||
|
|
||||||
We are very interested in feedback on how well this feature works for you, and
|
We are very interested in feedback on how well this feature works for you, and
|
||||||
suggestions to improve its usability, functionality, and reliability.
|
suggestions to improve its usability, functionality, and reliability.
|
||||||
|
@ -33,18 +30,18 @@ suggestions to improve its usability, functionality, and reliability.
|
||||||
Configuration
|
Configuration
|
||||||
=============
|
=============
|
||||||
|
|
||||||
The drop-upload frontend runs as part of a gateway node. To set it up, you
|
The Magic Folder frontend runs as part of a gateway node. To set it up, you
|
||||||
need to choose the local directory to monitor for file changes, and a mutable
|
need to choose the local directory to monitor for file changes, and a mutable
|
||||||
directory on the grid to which files will be uploaded.
|
directory on the grid to which files will be uploaded.
|
||||||
|
|
||||||
These settings are configured in the ``[drop_upload]`` section of the
|
These settings are configured in the ``[magic_folder]`` section of the
|
||||||
gateway's ``tahoe.cfg`` file.
|
gateway's ``tahoe.cfg`` file.
|
||||||
|
|
||||||
``[drop_upload]``
|
``[magic_folder]``
|
||||||
|
|
||||||
``enabled = (boolean, optional)``
|
``enabled = (boolean, optional)``
|
||||||
|
|
||||||
If this is ``True``, drop-upload will be enabled. The default value is
|
If this is ``True``, Magic Folder will be enabled. The default value is
|
||||||
``False``.
|
``False``.
|
||||||
|
|
||||||
``local.directory = (UTF-8 path)``
|
``local.directory = (UTF-8 path)``
|
||||||
|
@ -54,10 +51,11 @@ gateway's ``tahoe.cfg`` file.
|
||||||
in UTF-8 regardless of the system's filesystem encoding. Relative paths
|
in UTF-8 regardless of the system's filesystem encoding. Relative paths
|
||||||
will be interpreted starting from the node's base directory.
|
will be interpreted starting from the node's base directory.
|
||||||
|
|
||||||
In addition, the file ``private/drop_upload_dircap`` must contain a
|
In addition:
|
||||||
writecap pointing to an existing mutable directory to be used as the target
|
* the file ``private/magic_folder_dircap`` must contain a writecap pointing
|
||||||
of uploads. It will start with ``URI:DIR2:``, and cannot include an alias
|
to an existing mutable directory to be used as the target of uploads.
|
||||||
or path.
|
It will start with ``URI:DIR2:``, and cannot include an alias or path.
|
||||||
|
* the file ``private/collective_dircap`` must contain a readcap
|
||||||
|
|
||||||
After setting the above fields and starting or restarting the gateway,
|
After setting the above fields and starting or restarting the gateway,
|
||||||
you can confirm that the feature is working by copying a file into the
|
you can confirm that the feature is working by copying a file into the
|
||||||
|
@ -77,9 +75,8 @@ page and the node log_ may be helpful to determine the cause of any failures.
|
||||||
Known Issues and Limitations
|
Known Issues and Limitations
|
||||||
============================
|
============================
|
||||||
|
|
||||||
This frontend only works on Linux. There is an even-more-experimental
|
This frontend only works on Linux and Windows. There is a ticket to add
|
||||||
implementation for Windows (`#1431`_), and a ticket to add support for
|
support for Mac OS X and BSD-based systems (`#1432`_).
|
||||||
Mac OS X and BSD-based systems (`#1432`_).
|
|
||||||
|
|
||||||
Subdirectories of the local directory are not monitored. If a subdirectory
|
Subdirectories of the local directory are not monitored. If a subdirectory
|
||||||
is created, it will be ignored. (`#1433`_)
|
is created, it will be ignored. (`#1433`_)
|
||||||
|
@ -95,13 +92,13 @@ The only way to determine whether uploads have failed is to look at the
|
||||||
'Operational Statistics' page linked from the Welcome page. This only shows
|
'Operational Statistics' page linked from the Welcome page. This only shows
|
||||||
a count of failures, not the names of files. Uploads are never retried.
|
a count of failures, not the names of files. Uploads are never retried.
|
||||||
|
|
||||||
The drop-upload frontend performs its uploads sequentially (i.e. it waits
|
The Magic Folder frontend performs its uploads sequentially (i.e. it waits
|
||||||
until each upload is finished before starting the next), even when there
|
until each upload is finished before starting the next), even when there
|
||||||
would be enough memory and bandwidth to efficiently perform them in parallel.
|
would be enough memory and bandwidth to efficiently perform them in parallel.
|
||||||
A drop-upload can occur in parallel with an upload by a different frontend,
|
A Magic Folder upload can occur in parallel with an upload by a different
|
||||||
though. (`#1459`_)
|
frontend, though. (`#1459`_)
|
||||||
|
|
||||||
If there are a large number of near-simultaneous file creation or
|
On Linux, if there are a large number of near-simultaneous file creation or
|
||||||
change events (greater than the number specified in the file
|
change events (greater than the number specified in the file
|
||||||
``/proc/sys/fs/inotify/max_queued_events``), it is possible that some events
|
``/proc/sys/fs/inotify/max_queued_events``), it is possible that some events
|
||||||
could be missed. This is fairly unlikely under normal circumstances, because
|
could be missed. This is fairly unlikely under normal circumstances, because
|
||||||
|
@ -109,6 +106,11 @@ the default value of ``max_queued_events`` in most Linux distributions is
|
||||||
16384, and events are removed from this queue immediately without waiting for
|
16384, and events are removed from this queue immediately without waiting for
|
||||||
the corresponding upload to complete. (`#1430`_)
|
the corresponding upload to complete. (`#1430`_)
|
||||||
|
|
||||||
|
The Windows implementation might also occasionally miss file creation or
|
||||||
|
change events, due to limitations of the underlying Windows API
|
||||||
|
(ReadDirectoryChangesW). We do not know how likely or unlikely this is.
|
||||||
|
(`#1431`_)
|
||||||
|
|
||||||
Some filesystems may not support the necessary change notifications.
|
Some filesystems may not support the necessary change notifications.
|
||||||
So, it is recommended for the local directory to be on a directly attached
|
So, it is recommended for the local directory to be on a directly attached
|
||||||
disk-based filesystem, not a network filesystem or one provided by a virtual
|
disk-based filesystem, not a network filesystem or one provided by a virtual
|
||||||
|
@ -125,8 +127,8 @@ up-to-date. (`#1440`_)
|
||||||
Files deleted from the local directory will not be unlinked from the upload
|
Files deleted from the local directory will not be unlinked from the upload
|
||||||
directory. (`#1710`_)
|
directory. (`#1710`_)
|
||||||
|
|
||||||
The ``private/drop_upload_dircap`` file cannot use an alias or path to
|
The ``private/magic_folder_dircap`` and ``private/collective_dircap`` files
|
||||||
specify the upload directory. (`#1711`_)
|
cannot use an alias or path to specify the upload directory. (`#1711`_)
|
||||||
|
|
||||||
Files are always uploaded as immutable. If there is an existing mutable file
|
Files are always uploaded as immutable. If there is an existing mutable file
|
||||||
of the same name in the upload directory, it will be unlinked and replaced
|
of the same name in the upload directory, it will be unlinked and replaced
|
||||||
|
@ -137,9 +139,16 @@ file), then the old file is still present on the grid, and any other caps to
|
||||||
it will remain valid. See `docs/garbage-collection.rst`_ for how to reclaim
|
it will remain valid. See `docs/garbage-collection.rst`_ for how to reclaim
|
||||||
the space used by files that are no longer needed.
|
the space used by files that are no longer needed.
|
||||||
|
|
||||||
Unicode names are supported, but the local name of a file must be encoded
|
Unicode filenames are supported on both Linux and Windows, but on Linux, the
|
||||||
correctly in order for it to be uploaded. The expected encoding is that
|
local name of a file must be encoded correctly in order for it to be uploaded.
|
||||||
printed by ``python -c "import sys; print sys.getfilesystemencoding()"``.
|
The expected encoding is that printed by
|
||||||
|
``python -c "import sys; print sys.getfilesystemencoding()"``.
|
||||||
|
|
||||||
|
On Windows, local directories with non-ASCII names are not currently working.
|
||||||
|
(`#2219`_)
|
||||||
|
|
||||||
|
On Windows, when a node has Magic Folder enabled, it is unresponsive to Ctrl-C
|
||||||
|
(it can only be killed using Task Manager or similar). (`#2218`_)
|
||||||
|
|
||||||
.. _`#1105`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1105
|
.. _`#1105`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1105
|
||||||
.. _`#1430`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1430
|
.. _`#1430`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1430
|
||||||
|
@ -153,6 +162,8 @@ printed by ``python -c "import sys; print sys.getfilesystemencoding()"``.
|
||||||
.. _`#1710`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1710
|
.. _`#1710`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1710
|
||||||
.. _`#1711`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1711
|
.. _`#1711`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1711
|
||||||
.. _`#1712`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1712
|
.. _`#1712`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1712
|
||||||
|
.. _`#2218`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2218
|
||||||
|
.. _`#2219`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2219
|
||||||
|
|
||||||
.. _docs/garbage-collection.rst: ../garbage-collection.rst
|
.. _docs/garbage-collection.rst: ../garbage-collection.rst
|
||||||
|
|
|
@ -12,28 +12,28 @@ from allmydata.util.dbutil import get_db, DBError
|
||||||
DAY = 24*60*60
|
DAY = 24*60*60
|
||||||
MONTH = 30*DAY
|
MONTH = 30*DAY
|
||||||
|
|
||||||
SCHEMA_v1 = """
|
MAIN_SCHEMA = """
|
||||||
CREATE TABLE version -- added in v1
|
CREATE TABLE version
|
||||||
(
|
(
|
||||||
version INTEGER -- contains one row, set to 2
|
version INTEGER -- contains one row, set to %s
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE TABLE local_files -- added in v1
|
CREATE TABLE local_files
|
||||||
(
|
(
|
||||||
path VARCHAR(1024) PRIMARY KEY, -- index, this is an absolute UTF-8-encoded local filename
|
path VARCHAR(1024) PRIMARY KEY, -- index, this is an absolute UTF-8-encoded local filename
|
||||||
size INTEGER, -- os.stat(fn)[stat.ST_SIZE]
|
size INTEGER, -- os.stat(fn)[stat.ST_SIZE]
|
||||||
mtime NUMBER, -- os.stat(fn)[stat.ST_MTIME]
|
mtime NUMBER, -- os.stat(fn)[stat.ST_MTIME]
|
||||||
ctime NUMBER, -- os.stat(fn)[stat.ST_CTIME]
|
ctime NUMBER, -- os.stat(fn)[stat.ST_CTIME]
|
||||||
fileid INTEGER
|
fileid INTEGER%s
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE TABLE caps -- added in v1
|
CREATE TABLE caps
|
||||||
(
|
(
|
||||||
fileid INTEGER PRIMARY KEY AUTOINCREMENT,
|
fileid INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
filecap VARCHAR(256) UNIQUE -- URI:CHK:...
|
filecap VARCHAR(256) UNIQUE -- URI:CHK:...
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE TABLE last_upload -- added in v1
|
CREATE TABLE last_upload
|
||||||
(
|
(
|
||||||
fileid INTEGER PRIMARY KEY,
|
fileid INTEGER PRIMARY KEY,
|
||||||
last_uploaded TIMESTAMP,
|
last_uploaded TIMESTAMP,
|
||||||
|
@ -42,6 +42,8 @@ CREATE TABLE last_upload -- added in v1
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
SCHEMA_v1 = MAIN_SCHEMA % (1, "")
|
||||||
|
|
||||||
TABLE_DIRECTORY = """
|
TABLE_DIRECTORY = """
|
||||||
|
|
||||||
CREATE TABLE directories -- added in v2
|
CREATE TABLE directories -- added in v2
|
||||||
|
@ -54,7 +56,7 @@ CREATE TABLE directories -- added in v2
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
SCHEMA_v2 = SCHEMA_v1 + TABLE_DIRECTORY
|
SCHEMA_v2 = MAIN_SCHEMA % (2, "") + TABLE_DIRECTORY
|
||||||
|
|
||||||
UPDATE_v1_to_v2 = TABLE_DIRECTORY + """
|
UPDATE_v1_to_v2 = TABLE_DIRECTORY + """
|
||||||
UPDATE version SET version=2;
|
UPDATE version SET version=2;
|
||||||
|
@ -64,6 +66,10 @@ UPDATERS = {
|
||||||
2: UPDATE_v1_to_v2,
|
2: UPDATE_v1_to_v2,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
SCHEMA_v3 = MAIN_SCHEMA % (3, ",\nversion INTEGER\n") + TABLE_DIRECTORY
|
||||||
|
|
||||||
|
|
||||||
def get_backupdb(dbfile, stderr=sys.stderr,
|
def get_backupdb(dbfile, stderr=sys.stderr,
|
||||||
create_version=(SCHEMA_v2, 2), just_create=False):
|
create_version=(SCHEMA_v2, 2), just_create=False):
|
||||||
# Open or create the given backupdb file. The parent directory must
|
# Open or create the given backupdb file. The parent directory must
|
||||||
|
@ -71,7 +77,13 @@ def get_backupdb(dbfile, stderr=sys.stderr,
|
||||||
try:
|
try:
|
||||||
(sqlite3, db) = get_db(dbfile, stderr, create_version, updaters=UPDATERS,
|
(sqlite3, db) = get_db(dbfile, stderr, create_version, updaters=UPDATERS,
|
||||||
just_create=just_create, dbname="backupdb")
|
just_create=just_create, dbname="backupdb")
|
||||||
return BackupDB_v2(sqlite3, db)
|
if create_version[1] in (1, 2):
|
||||||
|
return BackupDB(sqlite3, db)
|
||||||
|
elif create_version[1] == 3:
|
||||||
|
return MagicFolderDB(sqlite3, db)
|
||||||
|
else:
|
||||||
|
print >>stderr, "invalid db schema version specified"
|
||||||
|
return None
|
||||||
except DBError, e:
|
except DBError, e:
|
||||||
print >>stderr, e
|
print >>stderr, e
|
||||||
return None
|
return None
|
||||||
|
@ -127,7 +139,7 @@ class DirectoryResult:
|
||||||
self.bdb.did_check_directory_healthy(self.dircap, results)
|
self.bdb.did_check_directory_healthy(self.dircap, results)
|
||||||
|
|
||||||
|
|
||||||
class BackupDB_v2:
|
class BackupDB:
|
||||||
VERSION = 2
|
VERSION = 2
|
||||||
NO_CHECK_BEFORE = 1*MONTH
|
NO_CHECK_BEFORE = 1*MONTH
|
||||||
ALWAYS_CHECK_AFTER = 2*MONTH
|
ALWAYS_CHECK_AFTER = 2*MONTH
|
||||||
|
@ -137,6 +149,21 @@ class BackupDB_v2:
|
||||||
self.connection = connection
|
self.connection = connection
|
||||||
self.cursor = connection.cursor()
|
self.cursor = connection.cursor()
|
||||||
|
|
||||||
|
def check_file_db_exists(self, path):
|
||||||
|
"""I will tell you if a given file has an entry in my database or not
|
||||||
|
by returning True or False.
|
||||||
|
"""
|
||||||
|
c = self.cursor
|
||||||
|
c.execute("SELECT size,mtime,ctime,fileid"
|
||||||
|
" FROM local_files"
|
||||||
|
" WHERE path=?",
|
||||||
|
(path,))
|
||||||
|
row = self.cursor.fetchone()
|
||||||
|
if not row:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
def check_file(self, path, use_timestamps=True):
|
def check_file(self, path, use_timestamps=True):
|
||||||
"""I will tell you if a given local file needs to be uploaded or not,
|
"""I will tell you if a given local file needs to be uploaded or not,
|
||||||
by looking in a database and seeing if I have a record of this file
|
by looking in a database and seeing if I have a record of this file
|
||||||
|
@ -336,3 +363,80 @@ class BackupDB_v2:
|
||||||
" WHERE dircap=?",
|
" WHERE dircap=?",
|
||||||
(now, dircap))
|
(now, dircap))
|
||||||
self.connection.commit()
|
self.connection.commit()
|
||||||
|
|
||||||
|
|
||||||
|
class MagicFolderDB(BackupDB):
|
||||||
|
VERSION = 3
|
||||||
|
|
||||||
|
def get_all_files(self):
|
||||||
|
"""Retreive a list of all files that have had an entry in magic-folder db
|
||||||
|
(files that have been downloaded at least once).
|
||||||
|
"""
|
||||||
|
self.cursor.execute("SELECT path FROM local_files")
|
||||||
|
rows = self.cursor.fetchall()
|
||||||
|
if not rows:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return rows
|
||||||
|
|
||||||
|
def get_local_file_version(self, path):
|
||||||
|
"""I will tell you the version of a local file tracked by our magic folder db.
|
||||||
|
If no db entry found then I'll return None.
|
||||||
|
"""
|
||||||
|
c = self.cursor
|
||||||
|
c.execute("SELECT version, fileid"
|
||||||
|
" FROM local_files"
|
||||||
|
" WHERE path=?",
|
||||||
|
(path,))
|
||||||
|
row = self.cursor.fetchone()
|
||||||
|
if not row:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return row[0]
|
||||||
|
|
||||||
|
def did_upload_file(self, filecap, path, version, mtime, ctime, size):
|
||||||
|
#print "_did_upload_file(%r, %r, %r, %r, %r, %r)" % (filecap, path, version, mtime, ctime, size)
|
||||||
|
now = time.time()
|
||||||
|
fileid = self.get_or_allocate_fileid_for_cap(filecap)
|
||||||
|
try:
|
||||||
|
self.cursor.execute("INSERT INTO last_upload VALUES (?,?,?)",
|
||||||
|
(fileid, now, now))
|
||||||
|
except (self.sqlite_module.IntegrityError, self.sqlite_module.OperationalError):
|
||||||
|
self.cursor.execute("UPDATE last_upload"
|
||||||
|
" SET last_uploaded=?, last_checked=?"
|
||||||
|
" WHERE fileid=?",
|
||||||
|
(now, now, fileid))
|
||||||
|
try:
|
||||||
|
self.cursor.execute("INSERT INTO local_files VALUES (?,?,?,?,?,?)",
|
||||||
|
(path, size, mtime, ctime, fileid, version))
|
||||||
|
except (self.sqlite_module.IntegrityError, self.sqlite_module.OperationalError):
|
||||||
|
self.cursor.execute("UPDATE local_files"
|
||||||
|
" SET size=?, mtime=?, ctime=?, fileid=?, version=?"
|
||||||
|
" WHERE path=?",
|
||||||
|
(size, mtime, ctime, fileid, version, path))
|
||||||
|
self.connection.commit()
|
||||||
|
|
||||||
|
def is_new_file_time(self, path, relpath_u):
|
||||||
|
"""recent_file_time returns true if the file is recent...
|
||||||
|
meaning its current statinfo (i.e. size, ctime, and mtime) matched the statinfo
|
||||||
|
that was previously stored in the db.
|
||||||
|
"""
|
||||||
|
#print "check_file_time %s %s" % (path, relpath_u)
|
||||||
|
path = abspath_expanduser_unicode(path)
|
||||||
|
s = os.stat(path)
|
||||||
|
size = s[stat.ST_SIZE]
|
||||||
|
ctime = s[stat.ST_CTIME]
|
||||||
|
mtime = s[stat.ST_MTIME]
|
||||||
|
c = self.cursor
|
||||||
|
c.execute("SELECT size,mtime,ctime,fileid"
|
||||||
|
" FROM local_files"
|
||||||
|
" WHERE path=?",
|
||||||
|
(relpath_u,))
|
||||||
|
row = self.cursor.fetchone()
|
||||||
|
if not row:
|
||||||
|
return True
|
||||||
|
(last_size,last_mtime,last_ctime,last_fileid) = row
|
||||||
|
if (size, ctime, mtime) == (last_size, last_ctime, last_mtime):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
|
@ -129,7 +129,9 @@ class Client(node.Node, pollmixin.PollMixin):
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, basedir="."):
|
def __init__(self, basedir="."):
|
||||||
|
#print "Client.__init__(%r)" % (basedir,)
|
||||||
node.Node.__init__(self, basedir)
|
node.Node.__init__(self, basedir)
|
||||||
|
self.connected_enough_d = defer.Deferred()
|
||||||
self.started_timestamp = time.time()
|
self.started_timestamp = time.time()
|
||||||
self.logSource="Client"
|
self.logSource="Client"
|
||||||
self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy()
|
self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy()
|
||||||
|
@ -150,7 +152,7 @@ class Client(node.Node, pollmixin.PollMixin):
|
||||||
# ControlServer and Helper are attached after Tub startup
|
# ControlServer and Helper are attached after Tub startup
|
||||||
self.init_ftp_server()
|
self.init_ftp_server()
|
||||||
self.init_sftp_server()
|
self.init_sftp_server()
|
||||||
self.init_drop_uploader()
|
self.init_magic_folder()
|
||||||
|
|
||||||
# If the node sees an exit_trigger file, it will poll every second to see
|
# If the node sees an exit_trigger file, it will poll every second to see
|
||||||
# whether the file still exists, and what its mtime is. If the file does not
|
# whether the file still exists, and what its mtime is. If the file does not
|
||||||
|
@ -344,7 +346,12 @@ class Client(node.Node, pollmixin.PollMixin):
|
||||||
def init_client_storage_broker(self):
|
def init_client_storage_broker(self):
|
||||||
# create a StorageFarmBroker object, for use by Uploader/Downloader
|
# create a StorageFarmBroker object, for use by Uploader/Downloader
|
||||||
# (and everybody else who wants to use storage servers)
|
# (and everybody else who wants to use storage servers)
|
||||||
sb = storage_client.StorageFarmBroker(self.tub, permute_peers=True)
|
|
||||||
|
connection_threshold = min(self.encoding_params["k"],
|
||||||
|
self.encoding_params["happy"] + 1)
|
||||||
|
|
||||||
|
sb = storage_client.StorageFarmBroker(self.tub, True, connection_threshold,
|
||||||
|
self.connected_enough_d)
|
||||||
self.storage_broker = sb
|
self.storage_broker = sb
|
||||||
|
|
||||||
# load static server specifications from tahoe.cfg, if any.
|
# load static server specifications from tahoe.cfg, if any.
|
||||||
|
@ -486,22 +493,31 @@ class Client(node.Node, pollmixin.PollMixin):
|
||||||
sftp_portstr, pubkey_file, privkey_file)
|
sftp_portstr, pubkey_file, privkey_file)
|
||||||
s.setServiceParent(self)
|
s.setServiceParent(self)
|
||||||
|
|
||||||
def init_drop_uploader(self):
|
def init_magic_folder(self):
|
||||||
|
#print "init_magic_folder"
|
||||||
if self.get_config("drop_upload", "enabled", False, boolean=True):
|
if self.get_config("drop_upload", "enabled", False, boolean=True):
|
||||||
if self.get_config("drop_upload", "upload.dircap", None):
|
raise OldConfigOptionError("The [drop_upload] section must be renamed to [magic_folder].\n"
|
||||||
raise OldConfigOptionError("The [drop_upload]upload.dircap option is no longer supported; please "
|
"See docs/frontends/magic-folder.rst for more information.")
|
||||||
"put the cap in a 'private/drop_upload_dircap' file, and delete this option.")
|
|
||||||
|
|
||||||
upload_dircap = self.get_or_create_private_config("drop_upload_dircap")
|
if self.get_config("magic_folder", "enabled", False, boolean=True):
|
||||||
local_dir_utf8 = self.get_config("drop_upload", "local.directory")
|
#print "magic folder enabled"
|
||||||
|
upload_dircap = self.get_private_config("magic_folder_dircap")
|
||||||
|
collective_dircap = self.get_private_config("collective_dircap")
|
||||||
|
|
||||||
try:
|
local_dir_config = self.get_config("magic_folder", "local.directory").decode("utf-8")
|
||||||
from allmydata.frontends import drop_upload
|
local_dir = abspath_expanduser_unicode(local_dir_config, base=self.basedir)
|
||||||
s = drop_upload.DropUploader(self, upload_dircap, local_dir_utf8)
|
|
||||||
|
dbfile = os.path.join(self.basedir, "private", "magicfolderdb.sqlite")
|
||||||
|
dbfile = abspath_expanduser_unicode(dbfile)
|
||||||
|
|
||||||
|
from allmydata.frontends import magic_folder
|
||||||
|
|
||||||
|
s = magic_folder.MagicFolder(self, upload_dircap, collective_dircap, local_dir, dbfile)
|
||||||
s.setServiceParent(self)
|
s.setServiceParent(self)
|
||||||
s.startService()
|
s.startService()
|
||||||
except Exception, e:
|
|
||||||
self.log("couldn't start drop-uploader: %r", args=(e,))
|
# start processing the upload queue when we've connected to enough servers
|
||||||
|
self.connected_enough_d.addCallback(s.ready)
|
||||||
|
|
||||||
def _check_exit_trigger(self, exit_trigger_file):
|
def _check_exit_trigger(self, exit_trigger_file):
|
||||||
if os.path.exists(exit_trigger_file):
|
if os.path.exists(exit_trigger_file):
|
||||||
|
|
|
@ -1,124 +0,0 @@
|
||||||
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
from twisted.python.filepath import FilePath
|
|
||||||
from twisted.application import service
|
|
||||||
from foolscap.api import eventually
|
|
||||||
|
|
||||||
from allmydata.interfaces import IDirectoryNode
|
|
||||||
|
|
||||||
from allmydata.util.encodingutil import quote_output, get_filesystem_encoding
|
|
||||||
from allmydata.util.fileutil import abspath_expanduser_unicode
|
|
||||||
from allmydata.immutable.upload import FileName
|
|
||||||
|
|
||||||
|
|
||||||
class DropUploader(service.MultiService):
|
|
||||||
name = 'drop-upload'
|
|
||||||
|
|
||||||
def __init__(self, client, upload_dircap, local_dir_utf8, inotify=None):
|
|
||||||
service.MultiService.__init__(self)
|
|
||||||
|
|
||||||
try:
|
|
||||||
local_dir_u = abspath_expanduser_unicode(local_dir_utf8.decode('utf-8'))
|
|
||||||
if sys.platform == "win32":
|
|
||||||
local_dir = local_dir_u
|
|
||||||
else:
|
|
||||||
local_dir = local_dir_u.encode(get_filesystem_encoding())
|
|
||||||
except (UnicodeEncodeError, UnicodeDecodeError):
|
|
||||||
raise AssertionError("The '[drop_upload] local.directory' parameter %s was not valid UTF-8 or "
|
|
||||||
"could not be represented in the filesystem encoding."
|
|
||||||
% quote_output(local_dir_utf8))
|
|
||||||
|
|
||||||
self._client = client
|
|
||||||
self._stats_provider = client.stats_provider
|
|
||||||
self._convergence = client.convergence
|
|
||||||
self._local_path = FilePath(local_dir)
|
|
||||||
|
|
||||||
if inotify is None:
|
|
||||||
from twisted.internet import inotify
|
|
||||||
self._inotify = inotify
|
|
||||||
|
|
||||||
if not self._local_path.exists():
|
|
||||||
raise AssertionError("The '[drop_upload] local.directory' parameter was %s but there is no directory at that location." % quote_output(local_dir_u))
|
|
||||||
if not self._local_path.isdir():
|
|
||||||
raise AssertionError("The '[drop_upload] local.directory' parameter was %s but the thing at that location is not a directory." % quote_output(local_dir_u))
|
|
||||||
|
|
||||||
# TODO: allow a path rather than a cap URI.
|
|
||||||
self._parent = self._client.create_node_from_uri(upload_dircap)
|
|
||||||
if not IDirectoryNode.providedBy(self._parent):
|
|
||||||
raise AssertionError("The URI in 'private/drop_upload_dircap' does not refer to a directory.")
|
|
||||||
if self._parent.is_unknown() or self._parent.is_readonly():
|
|
||||||
raise AssertionError("The URI in 'private/drop_upload_dircap' is not a writecap to a directory.")
|
|
||||||
|
|
||||||
self._uploaded_callback = lambda ign: None
|
|
||||||
|
|
||||||
self._notifier = inotify.INotify()
|
|
||||||
|
|
||||||
# We don't watch for IN_CREATE, because that would cause us to read and upload a
|
|
||||||
# possibly-incomplete file before the application has closed it. There should always
|
|
||||||
# be an IN_CLOSE_WRITE after an IN_CREATE (I think).
|
|
||||||
# TODO: what about IN_MOVE_SELF or IN_UNMOUNT?
|
|
||||||
mask = inotify.IN_CLOSE_WRITE | inotify.IN_MOVED_TO | inotify.IN_ONLYDIR
|
|
||||||
self._notifier.watch(self._local_path, mask=mask, callbacks=[self._notify])
|
|
||||||
|
|
||||||
def startService(self):
|
|
||||||
service.MultiService.startService(self)
|
|
||||||
d = self._notifier.startReading()
|
|
||||||
self._stats_provider.count('drop_upload.dirs_monitored', 1)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def _notify(self, opaque, path, events_mask):
|
|
||||||
self._log("inotify event %r, %r, %r\n" % (opaque, path, ', '.join(self._inotify.humanReadableMask(events_mask))))
|
|
||||||
|
|
||||||
self._stats_provider.count('drop_upload.files_queued', 1)
|
|
||||||
eventually(self._process, opaque, path, events_mask)
|
|
||||||
|
|
||||||
def _process(self, opaque, path, events_mask):
|
|
||||||
d = defer.succeed(None)
|
|
||||||
|
|
||||||
# FIXME: if this already exists as a mutable file, we replace the directory entry,
|
|
||||||
# but we should probably modify the file (as the SFTP frontend does).
|
|
||||||
def _add_file(ign):
|
|
||||||
name = path.basename()
|
|
||||||
# on Windows the name is already Unicode
|
|
||||||
if not isinstance(name, unicode):
|
|
||||||
name = name.decode(get_filesystem_encoding())
|
|
||||||
|
|
||||||
u = FileName(path.path, self._convergence)
|
|
||||||
return self._parent.add_file(name, u)
|
|
||||||
d.addCallback(_add_file)
|
|
||||||
|
|
||||||
def _succeeded(ign):
|
|
||||||
self._stats_provider.count('drop_upload.files_queued', -1)
|
|
||||||
self._stats_provider.count('drop_upload.files_uploaded', 1)
|
|
||||||
def _failed(f):
|
|
||||||
self._stats_provider.count('drop_upload.files_queued', -1)
|
|
||||||
if path.exists():
|
|
||||||
self._log("drop-upload: %r failed to upload due to %r" % (path.path, f))
|
|
||||||
self._stats_provider.count('drop_upload.files_failed', 1)
|
|
||||||
return f
|
|
||||||
else:
|
|
||||||
self._log("drop-upload: notified file %r disappeared "
|
|
||||||
"(this is normal for temporary files): %r" % (path.path, f))
|
|
||||||
self._stats_provider.count('drop_upload.files_disappeared', 1)
|
|
||||||
return None
|
|
||||||
d.addCallbacks(_succeeded, _failed)
|
|
||||||
d.addBoth(self._uploaded_callback)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def set_uploaded_callback(self, callback):
|
|
||||||
"""This sets a function that will be called after a file has been uploaded."""
|
|
||||||
self._uploaded_callback = callback
|
|
||||||
|
|
||||||
def finish(self, for_tests=False):
|
|
||||||
self._notifier.stopReading()
|
|
||||||
self._stats_provider.count('drop_upload.dirs_monitored', -1)
|
|
||||||
if for_tests and hasattr(self._notifier, 'wait_until_stopped'):
|
|
||||||
return self._notifier.wait_until_stopped()
|
|
||||||
else:
|
|
||||||
return defer.succeed(None)
|
|
||||||
|
|
||||||
def _log(self, msg):
|
|
||||||
self._client.log(msg)
|
|
||||||
#open("events", "ab+").write(msg)
|
|
|
@ -0,0 +1,602 @@
|
||||||
|
|
||||||
|
import sys, os, stat
|
||||||
|
import os.path
|
||||||
|
from collections import deque
|
||||||
|
import time
|
||||||
|
|
||||||
|
from twisted.internet import defer, reactor, task
|
||||||
|
from twisted.python.failure import Failure
|
||||||
|
from twisted.python import runtime
|
||||||
|
from twisted.application import service
|
||||||
|
|
||||||
|
from allmydata.util import fileutil
|
||||||
|
from allmydata.interfaces import IDirectoryNode
|
||||||
|
from allmydata.util import log
|
||||||
|
from allmydata.util.fileutil import precondition_abspath, get_pathinfo, abspath_expanduser_unicode
|
||||||
|
from allmydata.util.assertutil import precondition
|
||||||
|
from allmydata.util.deferredutil import HookMixin
|
||||||
|
from allmydata.util.encodingutil import listdir_unicode, to_filepath, \
|
||||||
|
unicode_from_filepath, quote_local_unicode_path, FilenameEncodingError
|
||||||
|
from allmydata.immutable.upload import FileName, Data
|
||||||
|
from allmydata import backupdb, magicpath
|
||||||
|
|
||||||
|
|
||||||
|
IN_EXCL_UNLINK = 0x04000000L
|
||||||
|
|
||||||
|
def get_inotify_module():
|
||||||
|
try:
|
||||||
|
if sys.platform == "win32":
|
||||||
|
from allmydata.windows import inotify
|
||||||
|
elif runtime.platform.supportsINotify():
|
||||||
|
from twisted.internet import inotify
|
||||||
|
else:
|
||||||
|
raise NotImplementedError("filesystem notification needed for drop-upload is not supported.\n"
|
||||||
|
"This currently requires Linux or Windows.")
|
||||||
|
return inotify
|
||||||
|
except (ImportError, AttributeError) as e:
|
||||||
|
log.msg(e)
|
||||||
|
if sys.platform == "win32":
|
||||||
|
raise NotImplementedError("filesystem notification needed for drop-upload is not supported.\n"
|
||||||
|
"Windows support requires at least Vista, and has only been tested on Windows 7.")
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
class MagicFolder(service.MultiService):
|
||||||
|
name = 'magic-folder'
|
||||||
|
|
||||||
|
def __init__(self, client, upload_dircap, collective_dircap, local_path_u, dbfile,
|
||||||
|
pending_delay=1.0):
|
||||||
|
precondition_abspath(local_path_u)
|
||||||
|
|
||||||
|
service.MultiService.__init__(self)
|
||||||
|
|
||||||
|
db = backupdb.get_backupdb(dbfile, create_version=(backupdb.SCHEMA_v3, 3))
|
||||||
|
if db is None:
|
||||||
|
return Failure(Exception('ERROR: Unable to load magic folder db.'))
|
||||||
|
|
||||||
|
# for tests
|
||||||
|
self._client = client
|
||||||
|
self._db = db
|
||||||
|
|
||||||
|
self.is_ready = False
|
||||||
|
|
||||||
|
self.uploader = Uploader(client, local_path_u, db, upload_dircap, pending_delay)
|
||||||
|
self.downloader = Downloader(client, local_path_u, db, collective_dircap)
|
||||||
|
|
||||||
|
def startService(self):
|
||||||
|
# TODO: why is this being called more than once?
|
||||||
|
if self.running:
|
||||||
|
return defer.succeed(None)
|
||||||
|
#print "%r.startService" % (self,)
|
||||||
|
service.MultiService.startService(self)
|
||||||
|
return self.uploader.start_monitoring()
|
||||||
|
|
||||||
|
def ready(self):
|
||||||
|
"""ready is used to signal us to start
|
||||||
|
processing the upload and download items...
|
||||||
|
"""
|
||||||
|
self.is_ready = True
|
||||||
|
d = self.uploader.start_scanning()
|
||||||
|
d2 = self.downloader.start_scanning()
|
||||||
|
d.addCallback(lambda ign: d2)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def finish(self):
|
||||||
|
#print "finish"
|
||||||
|
d = self.uploader.stop()
|
||||||
|
d2 = self.downloader.stop()
|
||||||
|
d.addCallback(lambda ign: d2)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def remove_service(self):
|
||||||
|
return service.MultiService.disownServiceParent(self)
|
||||||
|
|
||||||
|
|
||||||
|
class QueueMixin(HookMixin):
|
||||||
|
def __init__(self, client, local_path_u, db, name):
|
||||||
|
self._client = client
|
||||||
|
self._local_path_u = local_path_u
|
||||||
|
self._local_path = to_filepath(local_path_u)
|
||||||
|
self._db = db
|
||||||
|
self._name = name
|
||||||
|
self._hooks = {'processed': None, 'started': None}
|
||||||
|
self.started_d = self.set_hook('started')
|
||||||
|
|
||||||
|
if not self._local_path.exists():
|
||||||
|
raise AssertionError("The '[magic_folder] local.directory' parameter was %s "
|
||||||
|
"but there is no directory at that location."
|
||||||
|
% quote_local_unicode_path(self._local_path_u))
|
||||||
|
if not self._local_path.isdir():
|
||||||
|
raise AssertionError("The '[magic_folder] local.directory' parameter was %s "
|
||||||
|
"but the thing at that location is not a directory."
|
||||||
|
% quote_local_unicode_path(self._local_path_u))
|
||||||
|
|
||||||
|
self._deque = deque()
|
||||||
|
self._lazy_tail = defer.succeed(None)
|
||||||
|
self._pending = set()
|
||||||
|
self._stopped = False
|
||||||
|
self._turn_delay = 0
|
||||||
|
|
||||||
|
def _count(self, counter_name, delta=1):
|
||||||
|
self._client.stats_provider.count('magic_folder.%s.%s' % (self._name, counter_name), delta)
|
||||||
|
|
||||||
|
def _log(self, msg):
|
||||||
|
s = "Magic Folder %s: %s" % (self._name, msg)
|
||||||
|
self._client.log(s)
|
||||||
|
#print s
|
||||||
|
#open("events", "ab+").write(msg)
|
||||||
|
|
||||||
|
def _append_to_deque(self, path):
|
||||||
|
if path in self._pending:
|
||||||
|
return
|
||||||
|
self._deque.append(path)
|
||||||
|
self._pending.add(path)
|
||||||
|
self._count('objects_queued')
|
||||||
|
if self.is_ready:
|
||||||
|
reactor.callLater(0, self._turn_deque)
|
||||||
|
|
||||||
|
def _turn_deque(self):
|
||||||
|
if self._stopped:
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
item = self._deque.pop()
|
||||||
|
except IndexError:
|
||||||
|
self._log("deque is now empty")
|
||||||
|
self._lazy_tail.addCallback(lambda ign: self._when_queue_is_empty())
|
||||||
|
else:
|
||||||
|
self._lazy_tail.addCallback(lambda ign: self._process(item))
|
||||||
|
self._lazy_tail.addBoth(self._call_hook, 'processed')
|
||||||
|
self._lazy_tail.addErrback(log.err)
|
||||||
|
self._lazy_tail.addCallback(lambda ign: task.deferLater(reactor, self._turn_delay, self._turn_deque))
|
||||||
|
|
||||||
|
|
||||||
|
class Uploader(QueueMixin):
|
||||||
|
def __init__(self, client, local_path_u, db, upload_dircap, pending_delay):
|
||||||
|
QueueMixin.__init__(self, client, local_path_u, db, 'uploader')
|
||||||
|
|
||||||
|
self.is_ready = False
|
||||||
|
|
||||||
|
# TODO: allow a path rather than a cap URI.
|
||||||
|
self._upload_dirnode = self._client.create_node_from_uri(upload_dircap)
|
||||||
|
if not IDirectoryNode.providedBy(self._upload_dirnode):
|
||||||
|
raise AssertionError("The URI in 'private/magic_folder_dircap' does not refer to a directory.")
|
||||||
|
if self._upload_dirnode.is_unknown() or self._upload_dirnode.is_readonly():
|
||||||
|
raise AssertionError("The URI in 'private/magic_folder_dircap' is not a writecap to a directory.")
|
||||||
|
|
||||||
|
self._inotify = get_inotify_module()
|
||||||
|
self._notifier = self._inotify.INotify()
|
||||||
|
|
||||||
|
if hasattr(self._notifier, 'set_pending_delay'):
|
||||||
|
self._notifier.set_pending_delay(pending_delay)
|
||||||
|
|
||||||
|
# We don't watch for IN_CREATE, because that would cause us to read and upload a
|
||||||
|
# possibly-incomplete file before the application has closed it. There should always
|
||||||
|
# be an IN_CLOSE_WRITE after an IN_CREATE (I think).
|
||||||
|
# TODO: what about IN_MOVE_SELF, IN_MOVED_FROM, or IN_UNMOUNT?
|
||||||
|
#
|
||||||
|
self.mask = ( self._inotify.IN_CLOSE_WRITE
|
||||||
|
| self._inotify.IN_MOVED_TO
|
||||||
|
| self._inotify.IN_MOVED_FROM
|
||||||
|
| self._inotify.IN_DELETE
|
||||||
|
| self._inotify.IN_ONLYDIR
|
||||||
|
| IN_EXCL_UNLINK
|
||||||
|
)
|
||||||
|
self._notifier.watch(self._local_path, mask=self.mask, callbacks=[self._notify],
|
||||||
|
recursive=True)
|
||||||
|
|
||||||
|
def start_monitoring(self):
|
||||||
|
self._log("start_monitoring")
|
||||||
|
d = defer.succeed(None)
|
||||||
|
d.addCallback(lambda ign: self._notifier.startReading())
|
||||||
|
d.addCallback(lambda ign: self._count('dirs_monitored'))
|
||||||
|
d.addBoth(self._call_hook, 'started')
|
||||||
|
return d
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self._log("stop")
|
||||||
|
self._notifier.stopReading()
|
||||||
|
self._count('dirs_monitored', -1)
|
||||||
|
if hasattr(self._notifier, 'wait_until_stopped'):
|
||||||
|
d = self._notifier.wait_until_stopped()
|
||||||
|
else:
|
||||||
|
d = defer.succeed(None)
|
||||||
|
d.addCallback(lambda ign: self._lazy_tail)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def start_scanning(self):
|
||||||
|
self._log("start_scanning")
|
||||||
|
self.is_ready = True
|
||||||
|
all_files = self._db.get_all_files()
|
||||||
|
d = self._scan(self._local_path_u)
|
||||||
|
self._turn_deque()
|
||||||
|
return d
|
||||||
|
|
||||||
|
def _scan(self, local_path_u): # XXX should this take a FilePath?
|
||||||
|
self._log("scan %r" % (local_path_u))
|
||||||
|
if not os.path.isdir(local_path_u):
|
||||||
|
raise AssertionError("Programmer error: _scan() must be passed a directory path.")
|
||||||
|
quoted_path = quote_local_unicode_path(local_path_u)
|
||||||
|
try:
|
||||||
|
children = listdir_unicode(local_path_u)
|
||||||
|
except EnvironmentError:
|
||||||
|
raise(Exception("WARNING: magic folder: permission denied on directory %s" % (quoted_path,)))
|
||||||
|
except FilenameEncodingError:
|
||||||
|
raise(Exception("WARNING: magic folder: could not list directory %s due to a filename encoding error" % (quoted_path,)))
|
||||||
|
|
||||||
|
d = defer.succeed(None)
|
||||||
|
for child in children:
|
||||||
|
assert isinstance(child, unicode), child
|
||||||
|
d.addCallback(lambda ign, child=child: os.path.join(local_path_u, child))
|
||||||
|
d.addCallback(self._process_child)
|
||||||
|
d.addErrback(log.err)
|
||||||
|
|
||||||
|
return d
|
||||||
|
|
||||||
|
def _notify(self, opaque, path, events_mask):
|
||||||
|
self._log("inotify event %r, %r, %r\n" % (opaque, path, ', '.join(self._inotify.humanReadableMask(events_mask))))
|
||||||
|
path_u = unicode_from_filepath(path)
|
||||||
|
self._append_to_deque(path_u)
|
||||||
|
|
||||||
|
def _when_queue_is_empty(self):
|
||||||
|
return defer.succeed(None)
|
||||||
|
|
||||||
|
def _process_child(self, path_u):
|
||||||
|
precondition(isinstance(path_u, unicode), path_u)
|
||||||
|
|
||||||
|
pathinfo = get_pathinfo(path_u)
|
||||||
|
|
||||||
|
if pathinfo.islink:
|
||||||
|
self.warn("WARNING: cannot backup symlink %s" % quote_local_unicode_path(path_u))
|
||||||
|
return None
|
||||||
|
elif pathinfo.isdir:
|
||||||
|
# process directories unconditionally
|
||||||
|
self._append_to_deque(path_u)
|
||||||
|
|
||||||
|
# recurse on the child directory
|
||||||
|
return self._scan(path_u)
|
||||||
|
elif pathinfo.isfile:
|
||||||
|
file_version = self._db.get_local_file_version(path_u)
|
||||||
|
if file_version is None:
|
||||||
|
# XXX upload if we didn't record our version in magicfolder db?
|
||||||
|
self._append_to_deque(path_u)
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
d2 = self._get_collective_latest_file(path_u)
|
||||||
|
def _got_latest_file((file_node, metadata)):
|
||||||
|
collective_version = metadata['version']
|
||||||
|
if collective_version is None:
|
||||||
|
return None
|
||||||
|
if file_version > collective_version:
|
||||||
|
self._append_to_upload_deque(path_u)
|
||||||
|
elif file_version < collective_version: # FIXME Daira thinks this is wrong
|
||||||
|
# if a collective version of the file is newer than ours
|
||||||
|
# we must download it and unlink the old file from our upload dirnode
|
||||||
|
self._append_to_download_deque(path_u)
|
||||||
|
# XXX where should we save the returned deferred?
|
||||||
|
return self._upload_dirnode.delete(path_u, must_be_file=True)
|
||||||
|
else:
|
||||||
|
# XXX same version. do nothing.
|
||||||
|
pass
|
||||||
|
d2.addCallback(_got_latest_file)
|
||||||
|
return d2
|
||||||
|
else:
|
||||||
|
self.warn("WARNING: cannot backup special file %s" % quote_local_unicode_path(path_u))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _process(self, path_u):
|
||||||
|
precondition(isinstance(path_u, unicode), path_u)
|
||||||
|
|
||||||
|
d = defer.succeed(None)
|
||||||
|
|
||||||
|
def _maybe_upload(val):
|
||||||
|
pathinfo = get_pathinfo(path_u)
|
||||||
|
|
||||||
|
self._pending.remove(path_u) # FIXME make _upload_pending hold relative paths
|
||||||
|
relpath_u = os.path.relpath(path_u, self._local_path_u)
|
||||||
|
encoded_name_u = magicpath.path2magic(relpath_u)
|
||||||
|
|
||||||
|
if not pathinfo.exists:
|
||||||
|
self._log("drop-upload: notified object %r disappeared "
|
||||||
|
"(this is normal for temporary objects)" % (path_u,))
|
||||||
|
self._count('objects_disappeared')
|
||||||
|
d2 = defer.succeed(None)
|
||||||
|
if self._db.check_file_db_exists(relpath_u):
|
||||||
|
d2.addCallback(lambda ign: self._get_metadata(encoded_name_u))
|
||||||
|
current_version = self._db.get_local_file_version(relpath_u) + 1
|
||||||
|
def set_deleted(metadata):
|
||||||
|
metadata['version'] = current_version
|
||||||
|
metadata['deleted'] = True
|
||||||
|
empty_uploadable = Data("", self._client.convergence)
|
||||||
|
return self._upload_dirnode.add_file(encoded_name_u, empty_uploadable, overwrite=True, metadata=metadata)
|
||||||
|
d2.addCallback(set_deleted)
|
||||||
|
def add_db_entry(filenode):
|
||||||
|
filecap = filenode.get_uri()
|
||||||
|
size = 0
|
||||||
|
now = time.time()
|
||||||
|
ctime = now
|
||||||
|
mtime = now
|
||||||
|
self._db.did_upload_file(filecap, relpath_u, current_version, int(mtime), int(ctime), size)
|
||||||
|
self._count('files_uploaded')
|
||||||
|
d2.addCallback(lambda x: self._get_filenode(encoded_name_u))
|
||||||
|
d2.addCallback(add_db_entry)
|
||||||
|
|
||||||
|
d2.addCallback(lambda x: Exception("file does not exist")) # FIXME wrong
|
||||||
|
return d2
|
||||||
|
elif pathinfo.islink:
|
||||||
|
self.warn("WARNING: cannot upload symlink %s" % quote_local_unicode_path(path_u))
|
||||||
|
return None
|
||||||
|
elif pathinfo.isdir:
|
||||||
|
self._notifier.watch(to_filepath(path_u), mask=self.mask, callbacks=[self._notify], recursive=True)
|
||||||
|
uploadable = Data("", self._client.convergence)
|
||||||
|
encoded_name_u += u"@_"
|
||||||
|
upload_d = self._upload_dirnode.add_file(encoded_name_u, uploadable, metadata={"version":0}, overwrite=True)
|
||||||
|
def _succeeded(ign):
|
||||||
|
self._log("created subdirectory %r" % (path_u,))
|
||||||
|
self._count('directories_created')
|
||||||
|
def _failed(f):
|
||||||
|
self._log("failed to create subdirectory %r" % (path_u,))
|
||||||
|
return f
|
||||||
|
upload_d.addCallbacks(_succeeded, _failed)
|
||||||
|
upload_d.addCallback(lambda ign: self._scan(path_u))
|
||||||
|
return upload_d
|
||||||
|
elif pathinfo.isfile:
|
||||||
|
version = self._db.get_local_file_version(relpath_u)
|
||||||
|
if version is None:
|
||||||
|
version = 0
|
||||||
|
else:
|
||||||
|
if self._db.is_new_file_time(os.path.join(self._local_path_u, relpath_u), relpath_u):
|
||||||
|
version += 1
|
||||||
|
|
||||||
|
uploadable = FileName(path_u, self._client.convergence)
|
||||||
|
d2 = self._upload_dirnode.add_file(encoded_name_u, uploadable, metadata={"version":version}, overwrite=True)
|
||||||
|
def add_db_entry(filenode):
|
||||||
|
filecap = filenode.get_uri()
|
||||||
|
# XXX maybe just pass pathinfo
|
||||||
|
self._db.did_upload_file(filecap, relpath_u, version,
|
||||||
|
pathinfo.mtime, pathinfo.ctime, pathinfo.size)
|
||||||
|
self._count('files_uploaded')
|
||||||
|
d2.addCallback(add_db_entry)
|
||||||
|
return d2
|
||||||
|
else:
|
||||||
|
self.warn("WARNING: cannot process special file %s" % quote_local_unicode_path(path_u))
|
||||||
|
return None
|
||||||
|
|
||||||
|
d.addCallback(_maybe_upload)
|
||||||
|
|
||||||
|
def _succeeded(res):
|
||||||
|
self._count('objects_queued', -1)
|
||||||
|
self._count('objects_succeeded')
|
||||||
|
return res
|
||||||
|
def _failed(f):
|
||||||
|
self._count('objects_queued', -1)
|
||||||
|
self._count('objects_failed')
|
||||||
|
self._log("%r while processing %r" % (f, path_u))
|
||||||
|
return f
|
||||||
|
d.addCallbacks(_succeeded, _failed)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def _get_metadata(self, encoded_name_u):
|
||||||
|
try:
|
||||||
|
d = self._upload_dirnode.get_metadata_for(encoded_name_u)
|
||||||
|
except KeyError:
|
||||||
|
return Failure()
|
||||||
|
return d
|
||||||
|
|
||||||
|
def _get_filenode(self, encoded_name_u):
|
||||||
|
try:
|
||||||
|
d = self._upload_dirnode.get(encoded_name_u)
|
||||||
|
except KeyError:
|
||||||
|
return Failure()
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
class Downloader(QueueMixin):
|
||||||
|
def __init__(self, client, local_path_u, db, collective_dircap):
|
||||||
|
QueueMixin.__init__(self, client, local_path_u, db, 'downloader')
|
||||||
|
|
||||||
|
# TODO: allow a path rather than a cap URI.
|
||||||
|
self._collective_dirnode = self._client.create_node_from_uri(collective_dircap)
|
||||||
|
|
||||||
|
if not IDirectoryNode.providedBy(self._collective_dirnode):
|
||||||
|
raise AssertionError("The URI in 'private/collective_dircap' does not refer to a directory.")
|
||||||
|
if self._collective_dirnode.is_unknown() or not self._collective_dirnode.is_readonly():
|
||||||
|
raise AssertionError("The URI in 'private/collective_dircap' is not a readonly cap to a directory.")
|
||||||
|
|
||||||
|
self._turn_delay = 3 # delay between remote scans
|
||||||
|
self._download_scan_batch = {} # path -> [(filenode, metadata)]
|
||||||
|
|
||||||
|
def start_scanning(self):
|
||||||
|
self._log("\nstart_scanning")
|
||||||
|
files = self._db.get_all_files()
|
||||||
|
self._log("all files %s" % files)
|
||||||
|
|
||||||
|
d = self._scan_remote_collective()
|
||||||
|
self._turn_deque()
|
||||||
|
return d
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self._stopped = True
|
||||||
|
d = defer.succeed(None)
|
||||||
|
d.addCallback(lambda ign: self._lazy_tail)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def _should_download(self, relpath_u, remote_version):
|
||||||
|
"""
|
||||||
|
_should_download returns a bool indicating whether or not a remote object should be downloaded.
|
||||||
|
We check the remote metadata version against our magic-folder db version number;
|
||||||
|
latest version wins.
|
||||||
|
"""
|
||||||
|
v = self._db.get_local_file_version(relpath_u)
|
||||||
|
return (v is None or v < remote_version)
|
||||||
|
|
||||||
|
def _get_local_latest(self, path_u):
|
||||||
|
"""_get_local_latest takes a unicode path string checks to see if this file object
|
||||||
|
exists in our magic-folder db; if not then return None
|
||||||
|
else check for an entry in our magic-folder db and return the version number.
|
||||||
|
"""
|
||||||
|
if not os.path.exists(os.path.join(self._local_path_u,path_u)):
|
||||||
|
return None
|
||||||
|
return self._db.get_local_file_version(path_u)
|
||||||
|
|
||||||
|
def _get_collective_latest_file(self, filename):
|
||||||
|
"""_get_collective_latest_file takes a file path pointing to a file managed by
|
||||||
|
magic-folder and returns a deferred that fires with the two tuple containing a
|
||||||
|
file node and metadata for the latest version of the file located in the
|
||||||
|
magic-folder collective directory.
|
||||||
|
"""
|
||||||
|
collective_dirmap_d = self._collective_dirnode.list()
|
||||||
|
def scan_collective(result):
|
||||||
|
list_of_deferreds = []
|
||||||
|
for dir_name in result.keys():
|
||||||
|
# XXX make sure it's a directory
|
||||||
|
d = defer.succeed(None)
|
||||||
|
d.addCallback(lambda x, dir_name=dir_name: result[dir_name][0].get_child_and_metadata(filename))
|
||||||
|
list_of_deferreds.append(d)
|
||||||
|
deferList = defer.DeferredList(list_of_deferreds, consumeErrors=True)
|
||||||
|
return deferList
|
||||||
|
collective_dirmap_d.addCallback(scan_collective)
|
||||||
|
def highest_version(deferredList):
|
||||||
|
max_version = 0
|
||||||
|
metadata = None
|
||||||
|
node = None
|
||||||
|
for success, result in deferredList:
|
||||||
|
if success:
|
||||||
|
if result[1]['version'] > max_version:
|
||||||
|
node, metadata = result
|
||||||
|
max_version = result[1]['version']
|
||||||
|
return node, metadata
|
||||||
|
collective_dirmap_d.addCallback(highest_version)
|
||||||
|
return collective_dirmap_d
|
||||||
|
|
||||||
|
def _append_to_batch(self, name, file_node, metadata):
|
||||||
|
if self._download_scan_batch.has_key(name):
|
||||||
|
self._download_scan_batch[name] += [(file_node, metadata)]
|
||||||
|
else:
|
||||||
|
self._download_scan_batch[name] = [(file_node, metadata)]
|
||||||
|
|
||||||
|
def _scan_remote(self, nickname, dirnode):
|
||||||
|
self._log("_scan_remote nickname %r" % (nickname,))
|
||||||
|
d = dirnode.list()
|
||||||
|
def scan_listing(listing_map):
|
||||||
|
for name in listing_map.keys():
|
||||||
|
file_node, metadata = listing_map[name]
|
||||||
|
local_version = self._get_local_latest(name)
|
||||||
|
remote_version = metadata.get('version', None)
|
||||||
|
self._log("%r has local version %r, remote version %r" % (name, local_version, remote_version))
|
||||||
|
if local_version is None or remote_version is None or local_version < remote_version:
|
||||||
|
self._log("added to download queue\n")
|
||||||
|
self._append_to_batch(name, file_node, metadata)
|
||||||
|
d.addCallback(scan_listing)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def _scan_remote_collective(self):
|
||||||
|
self._log("_scan_remote_collective")
|
||||||
|
self._download_scan_batch = {} # XXX
|
||||||
|
|
||||||
|
if self._collective_dirnode is None:
|
||||||
|
return
|
||||||
|
collective_dirmap_d = self._collective_dirnode.list()
|
||||||
|
def do_list(result):
|
||||||
|
others = [x for x in result.keys()]
|
||||||
|
return result, others
|
||||||
|
collective_dirmap_d.addCallback(do_list)
|
||||||
|
def scan_collective(result):
|
||||||
|
d = defer.succeed(None)
|
||||||
|
collective_dirmap, others_list = result
|
||||||
|
for dir_name in others_list:
|
||||||
|
d.addCallback(lambda x, dir_name=dir_name: self._scan_remote(dir_name, collective_dirmap[dir_name][0]))
|
||||||
|
# XXX todo add errback
|
||||||
|
return d
|
||||||
|
collective_dirmap_d.addCallback(scan_collective)
|
||||||
|
collective_dirmap_d.addCallback(self._filter_scan_batch)
|
||||||
|
collective_dirmap_d.addCallback(self._add_batch_to_download_queue)
|
||||||
|
return collective_dirmap_d
|
||||||
|
|
||||||
|
def _add_batch_to_download_queue(self, result):
|
||||||
|
self._deque.extend(result)
|
||||||
|
self._pending.update(map(lambda x: x[0], result))
|
||||||
|
|
||||||
|
def _filter_scan_batch(self, result):
|
||||||
|
extension = [] # consider whether this should be a dict
|
||||||
|
for name in self._download_scan_batch.keys():
|
||||||
|
if name in self._pending:
|
||||||
|
continue
|
||||||
|
file_node, metadata = max(self._download_scan_batch[name], key=lambda x: x[1]['version'])
|
||||||
|
if self._should_download(name, metadata['version']):
|
||||||
|
extension += [(name, file_node, metadata)]
|
||||||
|
return extension
|
||||||
|
|
||||||
|
def _when_queue_is_empty(self):
|
||||||
|
d = task.deferLater(reactor, self._turn_delay, self._scan_remote_collective)
|
||||||
|
d.addCallback(lambda ign: self._turn_deque())
|
||||||
|
return d
|
||||||
|
|
||||||
|
def _process(self, item):
|
||||||
|
(name, file_node, metadata) = item
|
||||||
|
d = file_node.download_best_version()
|
||||||
|
def succeeded(res):
|
||||||
|
d2 = defer.succeed(res)
|
||||||
|
absname = abspath_expanduser_unicode(name, base=self._local_path_u)
|
||||||
|
d2.addCallback(lambda result: self._write_downloaded_file(absname, result, is_conflict=False))
|
||||||
|
def do_update_db(full_path):
|
||||||
|
filecap = file_node.get_uri()
|
||||||
|
try:
|
||||||
|
s = os.stat(full_path)
|
||||||
|
except:
|
||||||
|
raise(Exception("wtf downloaded file %s disappeared" % full_path))
|
||||||
|
size = s[stat.ST_SIZE]
|
||||||
|
ctime = s[stat.ST_CTIME]
|
||||||
|
mtime = s[stat.ST_MTIME]
|
||||||
|
self._db.did_upload_file(filecap, name, metadata['version'], mtime, ctime, size)
|
||||||
|
d2.addCallback(do_update_db)
|
||||||
|
# XXX handle failure here with addErrback...
|
||||||
|
self._count('objects_downloaded')
|
||||||
|
return d2
|
||||||
|
def failed(f):
|
||||||
|
self._log("download failed: %s" % (str(f),))
|
||||||
|
self._count('objects_download_failed')
|
||||||
|
return f
|
||||||
|
d.addCallbacks(succeeded, failed)
|
||||||
|
def remove_from_pending(res):
|
||||||
|
self._pending.remove(name)
|
||||||
|
return res
|
||||||
|
d.addBoth(remove_from_pending)
|
||||||
|
return d
|
||||||
|
|
||||||
|
FUDGE_SECONDS = 10.0
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _write_downloaded_file(cls, path, file_contents, is_conflict=False, now=None):
|
||||||
|
# 1. Write a temporary file, say .foo.tmp.
|
||||||
|
# 2. is_conflict determines whether this is an overwrite or a conflict.
|
||||||
|
# 3. Set the mtime of the replacement file to be T seconds before the
|
||||||
|
# current local time.
|
||||||
|
# 4. Perform a file replacement with backup filename foo.backup,
|
||||||
|
# replaced file foo, and replacement file .foo.tmp. If any step of
|
||||||
|
# this operation fails, reclassify as a conflict and stop.
|
||||||
|
#
|
||||||
|
# Returns the path of the destination file.
|
||||||
|
|
||||||
|
precondition(isinstance(path, unicode), path=path)
|
||||||
|
|
||||||
|
replacement_path = path + u".tmp" # FIXME more unique
|
||||||
|
backup_path = path + u".backup"
|
||||||
|
if now is None:
|
||||||
|
now = time.time()
|
||||||
|
|
||||||
|
fileutil.write(replacement_path, file_contents)
|
||||||
|
os.utime(replacement_path, (now, now - cls.FUDGE_SECONDS))
|
||||||
|
if is_conflict:
|
||||||
|
return cls._rename_conflicted_file(path, replacement_path)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
fileutil.replace_file(path, replacement_path, backup_path)
|
||||||
|
return path
|
||||||
|
except fileutil.ConflictError:
|
||||||
|
return cls._rename_conflicted_file(path, replacement_path)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _rename_conflicted_file(self, path, replacement_path):
|
||||||
|
conflict_path = path + u".conflict"
|
||||||
|
fileutil.rename_no_overwrite(replacement_path, conflict_path)
|
||||||
|
return conflict_path
|
|
@ -0,0 +1,27 @@
|
||||||
|
|
||||||
|
import re
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
from allmydata.util.assertutil import precondition
|
||||||
|
|
||||||
|
def path2magic(path):
|
||||||
|
return re.sub(ur'[/@]', lambda m: {u'/': u'@_', u'@': u'@@'}[m.group(0)], path)
|
||||||
|
|
||||||
|
def magic2path(path):
|
||||||
|
return re.sub(ur'@[_@]', lambda m: {u'@_': u'/', u'@@': u'@'}[m.group(0)], path)
|
||||||
|
|
||||||
|
|
||||||
|
IGNORE_SUFFIXES = [u'.backup', u'.tmp', u'.conflicted']
|
||||||
|
IGNORE_PREFIXES = [u'.']
|
||||||
|
|
||||||
|
def should_ignore_file(path_u):
|
||||||
|
precondition(isinstance(path_u, unicode), path_u=path_u)
|
||||||
|
|
||||||
|
for suffix in IGNORE_SUFFIXES:
|
||||||
|
if path_u.endswith(suffix):
|
||||||
|
return True
|
||||||
|
while path_u != u"":
|
||||||
|
path_u, tail_u = os.path.split(path_u)
|
||||||
|
if tail_u.startswith(u"."):
|
||||||
|
return True
|
||||||
|
return False
|
|
@ -57,9 +57,14 @@ class BasedirOptions(BaseOptions):
|
||||||
]
|
]
|
||||||
|
|
||||||
def parseArgs(self, basedir=None):
|
def parseArgs(self, basedir=None):
|
||||||
if self.parent['node-directory'] and self['basedir']:
|
# This finds the node-directory option correctly even if we are in a subcommand.
|
||||||
|
root = self.parent
|
||||||
|
while root.parent is not None:
|
||||||
|
root = root.parent
|
||||||
|
|
||||||
|
if root['node-directory'] and self['basedir']:
|
||||||
raise usage.UsageError("The --node-directory (or -d) and --basedir (or -C) options cannot both be used.")
|
raise usage.UsageError("The --node-directory (or -d) and --basedir (or -C) options cannot both be used.")
|
||||||
if self.parent['node-directory'] and basedir:
|
if root['node-directory'] and basedir:
|
||||||
raise usage.UsageError("The --node-directory (or -d) option and a basedir argument cannot both be used.")
|
raise usage.UsageError("The --node-directory (or -d) option and a basedir argument cannot both be used.")
|
||||||
if self['basedir'] and basedir:
|
if self['basedir'] and basedir:
|
||||||
raise usage.UsageError("The --basedir (or -C) option and a basedir argument cannot both be used.")
|
raise usage.UsageError("The --basedir (or -C) option and a basedir argument cannot both be used.")
|
||||||
|
@ -68,13 +73,14 @@ class BasedirOptions(BaseOptions):
|
||||||
b = argv_to_abspath(basedir)
|
b = argv_to_abspath(basedir)
|
||||||
elif self['basedir']:
|
elif self['basedir']:
|
||||||
b = argv_to_abspath(self['basedir'])
|
b = argv_to_abspath(self['basedir'])
|
||||||
elif self.parent['node-directory']:
|
elif root['node-directory']:
|
||||||
b = argv_to_abspath(self.parent['node-directory'])
|
b = argv_to_abspath(root['node-directory'])
|
||||||
elif self.default_nodedir:
|
elif self.default_nodedir:
|
||||||
b = self.default_nodedir
|
b = self.default_nodedir
|
||||||
else:
|
else:
|
||||||
raise usage.UsageError("No default basedir available, you must provide one with --node-directory, --basedir, or a basedir argument")
|
raise usage.UsageError("No default basedir available, you must provide one with --node-directory, --basedir, or a basedir argument")
|
||||||
self['basedir'] = b
|
self['basedir'] = b
|
||||||
|
self['node-directory'] = b
|
||||||
|
|
||||||
def postOptions(self):
|
def postOptions(self):
|
||||||
if not self['basedir']:
|
if not self['basedir']:
|
||||||
|
|
|
@ -0,0 +1,182 @@
|
||||||
|
|
||||||
|
import os
|
||||||
|
from cStringIO import StringIO
|
||||||
|
from twisted.python import usage
|
||||||
|
|
||||||
|
from .common import BaseOptions, BasedirOptions, get_aliases
|
||||||
|
from .cli import MakeDirectoryOptions, LnOptions, CreateAliasOptions
|
||||||
|
import tahoe_mv
|
||||||
|
from allmydata.util import fileutil
|
||||||
|
from allmydata import uri
|
||||||
|
|
||||||
|
INVITE_SEPARATOR = "+"
|
||||||
|
|
||||||
|
class CreateOptions(BasedirOptions):
|
||||||
|
nickname = None
|
||||||
|
localdir = None
|
||||||
|
synopsis = "MAGIC_ALIAS: [NICKNAME LOCALDIR]"
|
||||||
|
def parseArgs(self, alias, nickname=None, localdir=None):
|
||||||
|
BasedirOptions.parseArgs(self)
|
||||||
|
if not alias.endswith(':'):
|
||||||
|
raise usage.UsageError("An alias must end with a ':' character.")
|
||||||
|
self.alias = alias[:-1]
|
||||||
|
self.nickname = nickname
|
||||||
|
self.localdir = localdir
|
||||||
|
if self.nickname and not self.localdir:
|
||||||
|
raise usage.UsageError("If NICKNAME is specified then LOCALDIR must also be specified.")
|
||||||
|
node_url_file = os.path.join(self['node-directory'], "node.url")
|
||||||
|
self['node-url'] = fileutil.read(node_url_file).strip()
|
||||||
|
|
||||||
|
def _delegate_options(source_options, target_options):
|
||||||
|
target_options.aliases = get_aliases(source_options['node-directory'])
|
||||||
|
target_options["node-url"] = source_options["node-url"]
|
||||||
|
target_options["node-directory"] = source_options["node-directory"]
|
||||||
|
target_options.stdin = StringIO("")
|
||||||
|
target_options.stdout = StringIO()
|
||||||
|
target_options.stderr = StringIO()
|
||||||
|
return target_options
|
||||||
|
|
||||||
|
def create(options):
|
||||||
|
from allmydata.scripts import tahoe_add_alias
|
||||||
|
create_alias_options = _delegate_options(options, CreateAliasOptions())
|
||||||
|
create_alias_options.alias = options.alias
|
||||||
|
|
||||||
|
rc = tahoe_add_alias.create_alias(create_alias_options)
|
||||||
|
if rc != 0:
|
||||||
|
print >>options.stderr, create_alias_options.stderr.getvalue()
|
||||||
|
return rc
|
||||||
|
print >>options.stdout, create_alias_options.stdout.getvalue()
|
||||||
|
|
||||||
|
if options.nickname is not None:
|
||||||
|
invite_options = _delegate_options(options, InviteOptions())
|
||||||
|
invite_options.alias = options.alias
|
||||||
|
invite_options.nickname = options.nickname
|
||||||
|
rc = invite(invite_options)
|
||||||
|
if rc != 0:
|
||||||
|
print >>options.stderr, "magic-folder: failed to invite after create\n"
|
||||||
|
print >>options.stderr, invite_options.stderr.getvalue()
|
||||||
|
return rc
|
||||||
|
invite_code = invite_options.stdout.getvalue().strip()
|
||||||
|
|
||||||
|
join_options = _delegate_options(options, JoinOptions())
|
||||||
|
join_options.invite_code = invite_code
|
||||||
|
fields = invite_code.split(INVITE_SEPARATOR)
|
||||||
|
if len(fields) != 2:
|
||||||
|
raise usage.UsageError("Invalid invite code.")
|
||||||
|
join_options.magic_readonly_cap, join_options.dmd_write_cap = fields
|
||||||
|
join_options.local_dir = options.localdir
|
||||||
|
rc = join(join_options)
|
||||||
|
if rc != 0:
|
||||||
|
print >>options.stderr, "magic-folder: failed to join after create\n"
|
||||||
|
print >>options.stderr, join_options.stderr.getvalue()
|
||||||
|
return rc
|
||||||
|
return 0
|
||||||
|
|
||||||
|
class InviteOptions(BasedirOptions):
|
||||||
|
nickname = None
|
||||||
|
synopsis = "MAGIC_ALIAS: NICKNAME"
|
||||||
|
stdin = StringIO("")
|
||||||
|
def parseArgs(self, alias, nickname=None):
|
||||||
|
BasedirOptions.parseArgs(self)
|
||||||
|
if not alias.endswith(':'):
|
||||||
|
raise usage.UsageError("An alias must end with a ':' character.")
|
||||||
|
self.alias = alias[:-1]
|
||||||
|
self.nickname = nickname
|
||||||
|
node_url_file = os.path.join(self['node-directory'], "node.url")
|
||||||
|
self['node-url'] = open(node_url_file, "r").read().strip()
|
||||||
|
aliases = get_aliases(self['node-directory'])
|
||||||
|
self.aliases = aliases
|
||||||
|
|
||||||
|
def invite(options):
|
||||||
|
from allmydata.scripts import tahoe_mkdir
|
||||||
|
mkdir_options = _delegate_options(options, MakeDirectoryOptions())
|
||||||
|
mkdir_options.where = None
|
||||||
|
|
||||||
|
rc = tahoe_mkdir.mkdir(mkdir_options)
|
||||||
|
if rc != 0:
|
||||||
|
print >>options.stderr, "magic-folder: failed to mkdir\n"
|
||||||
|
return rc
|
||||||
|
dmd_write_cap = mkdir_options.stdout.getvalue().strip()
|
||||||
|
dmd_readonly_cap = unicode(uri.from_string(dmd_write_cap).get_readonly().to_string(), 'utf-8')
|
||||||
|
if dmd_readonly_cap is None:
|
||||||
|
print >>options.stderr, "magic-folder: failed to diminish dmd write cap\n"
|
||||||
|
return 1
|
||||||
|
|
||||||
|
magic_write_cap = get_aliases(options["node-directory"])[options.alias]
|
||||||
|
magic_readonly_cap = unicode(uri.from_string(magic_write_cap).get_readonly().to_string(), 'utf-8')
|
||||||
|
# tahoe ln CLIENT_READCAP COLLECTIVE_WRITECAP/NICKNAME
|
||||||
|
ln_options = _delegate_options(options, LnOptions())
|
||||||
|
ln_options.from_file = dmd_readonly_cap
|
||||||
|
ln_options.to_file = "%s/%s" % (magic_write_cap, options.nickname)
|
||||||
|
rc = tahoe_mv.mv(ln_options, mode="link")
|
||||||
|
if rc != 0:
|
||||||
|
print >>options.stderr, "magic-folder: failed to create link\n"
|
||||||
|
print >>options.stderr, ln_options.stderr.getvalue()
|
||||||
|
return rc
|
||||||
|
|
||||||
|
print >>options.stdout, "%s%s%s" % (magic_readonly_cap, INVITE_SEPARATOR, dmd_write_cap)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
class JoinOptions(BasedirOptions):
|
||||||
|
synopsis = "INVITE_CODE LOCAL_DIR"
|
||||||
|
dmd_write_cap = ""
|
||||||
|
magic_readonly_cap = ""
|
||||||
|
def parseArgs(self, invite_code, local_dir):
|
||||||
|
BasedirOptions.parseArgs(self)
|
||||||
|
self.local_dir = local_dir
|
||||||
|
fields = invite_code.split(INVITE_SEPARATOR)
|
||||||
|
if len(fields) != 2:
|
||||||
|
raise usage.UsageError("Invalid invite code.")
|
||||||
|
self.magic_readonly_cap, self.dmd_write_cap = fields
|
||||||
|
|
||||||
|
def join(options):
|
||||||
|
dmd_cap_file = os.path.join(options["node-directory"], "private/magic_folder_dircap")
|
||||||
|
collective_readcap_file = os.path.join(options["node-directory"], "private/collective_dircap")
|
||||||
|
|
||||||
|
fileutil.write(dmd_cap_file, options.dmd_write_cap)
|
||||||
|
fileutil.write(collective_readcap_file, options.magic_readonly_cap)
|
||||||
|
fileutil.write(os.path.join(options["node-directory"], "tahoe.cfg"),
|
||||||
|
"[magic_folder]\nenabled = True\nlocal.directory = %s\n"
|
||||||
|
% (options.local_dir.encode('utf-8'),), mode="ab")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
class MagicFolderCommand(BaseOptions):
|
||||||
|
subCommands = [
|
||||||
|
["create", None, CreateOptions, "Create a Magic Folder."],
|
||||||
|
["invite", None, InviteOptions, "Invite someone to a Magic Folder."],
|
||||||
|
["join", None, JoinOptions, "Join a Magic Folder."],
|
||||||
|
]
|
||||||
|
def postOptions(self):
|
||||||
|
if not hasattr(self, 'subOptions'):
|
||||||
|
raise usage.UsageError("must specify a subcommand")
|
||||||
|
def getSynopsis(self):
|
||||||
|
return "Usage: tahoe [global-options] magic SUBCOMMAND"
|
||||||
|
def getUsage(self, width=None):
|
||||||
|
t = BaseOptions.getUsage(self, width)
|
||||||
|
t += """\
|
||||||
|
Please run e.g. 'tahoe magic-folder create --help' for more details on each
|
||||||
|
subcommand.
|
||||||
|
"""
|
||||||
|
return t
|
||||||
|
|
||||||
|
subDispatch = {
|
||||||
|
"create": create,
|
||||||
|
"invite": invite,
|
||||||
|
"join": join,
|
||||||
|
}
|
||||||
|
|
||||||
|
def do_magic_folder(options):
|
||||||
|
so = options.subOptions
|
||||||
|
so.stdout = options.stdout
|
||||||
|
so.stderr = options.stderr
|
||||||
|
f = subDispatch[options.subCommand]
|
||||||
|
return f(so)
|
||||||
|
|
||||||
|
subCommands = [
|
||||||
|
["magic-folder", None, MagicFolderCommand,
|
||||||
|
"Magic Folder subcommands: use 'tahoe magic-folder' for a list."],
|
||||||
|
]
|
||||||
|
|
||||||
|
dispatch = {
|
||||||
|
"magic-folder": do_magic_folder,
|
||||||
|
}
|
|
@ -5,7 +5,8 @@ from cStringIO import StringIO
|
||||||
from twisted.python import usage
|
from twisted.python import usage
|
||||||
|
|
||||||
from allmydata.scripts.common import get_default_nodedir
|
from allmydata.scripts.common import get_default_nodedir
|
||||||
from allmydata.scripts import debug, create_node, startstop_node, cli, keygen, stats_gatherer, admin
|
from allmydata.scripts import debug, create_node, startstop_node, cli, keygen, stats_gatherer, admin, \
|
||||||
|
magic_folder_cli
|
||||||
from allmydata.util.encodingutil import quote_output, quote_local_unicode_path, get_io_encoding
|
from allmydata.util.encodingutil import quote_output, quote_local_unicode_path, get_io_encoding
|
||||||
|
|
||||||
def GROUP(s):
|
def GROUP(s):
|
||||||
|
@ -45,6 +46,7 @@ class Options(usage.Options):
|
||||||
+ debug.subCommands
|
+ debug.subCommands
|
||||||
+ GROUP("Using the filesystem")
|
+ GROUP("Using the filesystem")
|
||||||
+ cli.subCommands
|
+ cli.subCommands
|
||||||
|
+ magic_folder_cli.subCommands
|
||||||
)
|
)
|
||||||
|
|
||||||
optFlags = [
|
optFlags = [
|
||||||
|
@ -143,6 +145,8 @@ def runner(argv,
|
||||||
rc = admin.dispatch[command](so)
|
rc = admin.dispatch[command](so)
|
||||||
elif command in cli.dispatch:
|
elif command in cli.dispatch:
|
||||||
rc = cli.dispatch[command](so)
|
rc = cli.dispatch[command](so)
|
||||||
|
elif command in magic_folder_cli.dispatch:
|
||||||
|
rc = magic_folder_cli.dispatch[command](so)
|
||||||
elif command in ac_dispatch:
|
elif command in ac_dispatch:
|
||||||
rc = ac_dispatch[command](so, stdout, stderr)
|
rc = ac_dispatch[command](so, stdout, stderr)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -8,7 +8,7 @@ from allmydata.scripts.common import get_alias, escape_path, DEFAULT_ALIAS, \
|
||||||
UnknownAliasError
|
UnknownAliasError
|
||||||
from allmydata.scripts.common_http import do_http, HTTPError, format_http_error
|
from allmydata.scripts.common_http import do_http, HTTPError, format_http_error
|
||||||
from allmydata.util import time_format
|
from allmydata.util import time_format
|
||||||
from allmydata.scripts import backupdb
|
from allmydata import backupdb
|
||||||
from allmydata.util.encodingutil import listdir_unicode, quote_output, \
|
from allmydata.util.encodingutil import listdir_unicode, quote_output, \
|
||||||
quote_local_unicode_path, to_str, FilenameEncodingError, unicode_to_url
|
quote_local_unicode_path, to_str, FilenameEncodingError, unicode_to_url
|
||||||
from allmydata.util.assertutil import precondition
|
from allmydata.util.assertutil import precondition
|
||||||
|
|
|
@ -151,9 +151,7 @@ def list(options):
|
||||||
line.append(uri)
|
line.append(uri)
|
||||||
if options["readonly-uri"]:
|
if options["readonly-uri"]:
|
||||||
line.append(quote_output(ro_uri or "-", quotemarks=False))
|
line.append(quote_output(ro_uri or "-", quotemarks=False))
|
||||||
|
|
||||||
rows.append((encoding_error, line))
|
rows.append((encoding_error, line))
|
||||||
|
|
||||||
max_widths = []
|
max_widths = []
|
||||||
left_justifys = []
|
left_justifys = []
|
||||||
for (encoding_error, row) in rows:
|
for (encoding_error, row) in rows:
|
||||||
|
|
|
@ -62,10 +62,12 @@ class StorageFarmBroker:
|
||||||
I'm also responsible for subscribing to the IntroducerClient to find out
|
I'm also responsible for subscribing to the IntroducerClient to find out
|
||||||
about new servers as they are announced by the Introducer.
|
about new servers as they are announced by the Introducer.
|
||||||
"""
|
"""
|
||||||
def __init__(self, tub, permute_peers):
|
def __init__(self, tub, permute_peers, connected_threshold, connected_d):
|
||||||
self.tub = tub
|
self.tub = tub
|
||||||
assert permute_peers # False not implemented yet
|
assert permute_peers # False not implemented yet
|
||||||
self.permute_peers = permute_peers
|
self.permute_peers = permute_peers
|
||||||
|
self.connected_threshold = connected_threshold
|
||||||
|
self.connected_d = connected_d
|
||||||
# self.servers maps serverid -> IServer, and keeps track of all the
|
# self.servers maps serverid -> IServer, and keeps track of all the
|
||||||
# storage servers that we've heard about. Each descriptor manages its
|
# storage servers that we've heard about. Each descriptor manages its
|
||||||
# own Reconnector, and will give us a RemoteReference when we ask
|
# own Reconnector, and will give us a RemoteReference when we ask
|
||||||
|
@ -75,7 +77,7 @@ class StorageFarmBroker:
|
||||||
|
|
||||||
# these two are used in unit tests
|
# these two are used in unit tests
|
||||||
def test_add_rref(self, serverid, rref, ann):
|
def test_add_rref(self, serverid, rref, ann):
|
||||||
s = NativeStorageServer(serverid, ann.copy())
|
s = NativeStorageServer(serverid, ann.copy(), self)
|
||||||
s.rref = rref
|
s.rref = rref
|
||||||
s._is_connected = True
|
s._is_connected = True
|
||||||
self.servers[serverid] = s
|
self.servers[serverid] = s
|
||||||
|
@ -92,7 +94,7 @@ class StorageFarmBroker:
|
||||||
precondition(isinstance(key_s, str), key_s)
|
precondition(isinstance(key_s, str), key_s)
|
||||||
precondition(key_s.startswith("v0-"), key_s)
|
precondition(key_s.startswith("v0-"), key_s)
|
||||||
assert ann["service-name"] == "storage"
|
assert ann["service-name"] == "storage"
|
||||||
s = NativeStorageServer(key_s, ann)
|
s = NativeStorageServer(key_s, ann, self)
|
||||||
serverid = s.get_serverid()
|
serverid = s.get_serverid()
|
||||||
old = self.servers.get(serverid)
|
old = self.servers.get(serverid)
|
||||||
if old:
|
if old:
|
||||||
|
@ -118,6 +120,13 @@ class StorageFarmBroker:
|
||||||
for dsc in self.servers.values():
|
for dsc in self.servers.values():
|
||||||
dsc.try_to_connect()
|
dsc.try_to_connect()
|
||||||
|
|
||||||
|
def check_enough_connected(self):
|
||||||
|
if (self.connected_d is not None and
|
||||||
|
len(self.get_connected_servers()) >= self.connected_threshold):
|
||||||
|
d = self.connected_d
|
||||||
|
self.connected_d = None
|
||||||
|
d.callback(None)
|
||||||
|
|
||||||
def get_servers_for_psi(self, peer_selection_index):
|
def get_servers_for_psi(self, peer_selection_index):
|
||||||
# return a list of server objects (IServers)
|
# return a list of server objects (IServers)
|
||||||
assert self.permute_peers == True
|
assert self.permute_peers == True
|
||||||
|
@ -187,9 +196,10 @@ class NativeStorageServer:
|
||||||
"application-version": "unknown: no get_version()",
|
"application-version": "unknown: no get_version()",
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, key_s, ann):
|
def __init__(self, key_s, ann, broker):
|
||||||
self.key_s = key_s
|
self.key_s = key_s
|
||||||
self.announcement = ann
|
self.announcement = ann
|
||||||
|
self.broker = broker
|
||||||
|
|
||||||
assert "anonymous-storage-FURL" in ann, ann
|
assert "anonymous-storage-FURL" in ann, ann
|
||||||
furl = str(ann["anonymous-storage-FURL"])
|
furl = str(ann["anonymous-storage-FURL"])
|
||||||
|
@ -290,6 +300,7 @@ class NativeStorageServer:
|
||||||
default = self.VERSION_DEFAULTS
|
default = self.VERSION_DEFAULTS
|
||||||
d = add_version_to_remote_reference(rref, default)
|
d = add_version_to_remote_reference(rref, default)
|
||||||
d.addCallback(self._got_versioned_service, lp)
|
d.addCallback(self._got_versioned_service, lp)
|
||||||
|
d.addCallback(lambda ign: self.broker.check_enough_connected())
|
||||||
d.addErrback(log.err, format="storageclient._got_connection",
|
d.addErrback(log.err, format="storageclient._got_connection",
|
||||||
name=self.get_name(), umid="Sdq3pg")
|
name=self.get_name(), umid="Sdq3pg")
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,9 @@ from twisted.internet import defer, reactor
|
||||||
from twisted.python.failure import Failure
|
from twisted.python.failure import Failure
|
||||||
from foolscap.api import Referenceable, fireEventually, RemoteException
|
from foolscap.api import Referenceable, fireEventually, RemoteException
|
||||||
from base64 import b32encode
|
from base64 import b32encode
|
||||||
|
|
||||||
|
from allmydata.util.assertutil import _assert
|
||||||
|
|
||||||
from allmydata import uri as tahoe_uri
|
from allmydata import uri as tahoe_uri
|
||||||
from allmydata.client import Client
|
from allmydata.client import Client
|
||||||
from allmydata.storage.server import StorageServer, storage_index_to_dir
|
from allmydata.storage.server import StorageServer, storage_index_to_dir
|
||||||
|
@ -174,6 +177,9 @@ class NoNetworkStorageBroker:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
class NoNetworkClient(Client):
|
class NoNetworkClient(Client):
|
||||||
|
|
||||||
|
def disownServiceParent(self):
|
||||||
|
self.disownServiceParent()
|
||||||
def create_tub(self):
|
def create_tub(self):
|
||||||
pass
|
pass
|
||||||
def init_introducer_client(self):
|
def init_introducer_client(self):
|
||||||
|
@ -232,6 +238,7 @@ class NoNetworkGrid(service.MultiService):
|
||||||
self.proxies_by_id = {} # maps to IServer on which .rref is a wrapped
|
self.proxies_by_id = {} # maps to IServer on which .rref is a wrapped
|
||||||
# StorageServer
|
# StorageServer
|
||||||
self.clients = []
|
self.clients = []
|
||||||
|
self.client_config_hooks = client_config_hooks
|
||||||
|
|
||||||
for i in range(num_servers):
|
for i in range(num_servers):
|
||||||
ss = self.make_server(i)
|
ss = self.make_server(i)
|
||||||
|
@ -239,30 +246,42 @@ class NoNetworkGrid(service.MultiService):
|
||||||
self.rebuild_serverlist()
|
self.rebuild_serverlist()
|
||||||
|
|
||||||
for i in range(num_clients):
|
for i in range(num_clients):
|
||||||
|
c = self.make_client(i)
|
||||||
|
self.clients.append(c)
|
||||||
|
|
||||||
|
def make_client(self, i, write_config=True):
|
||||||
clientid = hashutil.tagged_hash("clientid", str(i))[:20]
|
clientid = hashutil.tagged_hash("clientid", str(i))[:20]
|
||||||
clientdir = os.path.join(basedir, "clients",
|
clientdir = os.path.join(self.basedir, "clients",
|
||||||
idlib.shortnodeid_b2a(clientid))
|
idlib.shortnodeid_b2a(clientid))
|
||||||
fileutil.make_dirs(clientdir)
|
fileutil.make_dirs(clientdir)
|
||||||
f = open(os.path.join(clientdir, "tahoe.cfg"), "w")
|
|
||||||
|
tahoe_cfg_path = os.path.join(clientdir, "tahoe.cfg")
|
||||||
|
if write_config:
|
||||||
|
f = open(tahoe_cfg_path, "w")
|
||||||
f.write("[node]\n")
|
f.write("[node]\n")
|
||||||
f.write("nickname = client-%d\n" % i)
|
f.write("nickname = client-%d\n" % i)
|
||||||
f.write("web.port = tcp:0:interface=127.0.0.1\n")
|
f.write("web.port = tcp:0:interface=127.0.0.1\n")
|
||||||
f.write("[storage]\n")
|
f.write("[storage]\n")
|
||||||
f.write("enabled = false\n")
|
f.write("enabled = false\n")
|
||||||
f.close()
|
f.close()
|
||||||
|
else:
|
||||||
|
_assert(os.path.exists(tahoe_cfg_path), tahoe_cfg_path=tahoe_cfg_path)
|
||||||
|
|
||||||
c = None
|
c = None
|
||||||
if i in client_config_hooks:
|
if i in self.client_config_hooks:
|
||||||
# this hook can either modify tahoe.cfg, or return an
|
# this hook can either modify tahoe.cfg, or return an
|
||||||
# entirely new Client instance
|
# entirely new Client instance
|
||||||
c = client_config_hooks[i](clientdir)
|
c = self.client_config_hooks[i](clientdir)
|
||||||
|
|
||||||
if not c:
|
if not c:
|
||||||
c = NoNetworkClient(clientdir)
|
c = NoNetworkClient(clientdir)
|
||||||
c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
|
c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
|
||||||
|
|
||||||
c.nodeid = clientid
|
c.nodeid = clientid
|
||||||
c.short_nodeid = b32encode(clientid).lower()[:8]
|
c.short_nodeid = b32encode(clientid).lower()[:8]
|
||||||
c._servers = self.all_servers # can be updated later
|
c._servers = self.all_servers # can be updated later
|
||||||
c.setServiceParent(self)
|
c.setServiceParent(self)
|
||||||
self.clients.append(c)
|
return c
|
||||||
|
|
||||||
def make_server(self, i, readonly=False):
|
def make_server(self, i, readonly=False):
|
||||||
serverid = hashutil.tagged_hash("serverid", str(i))[:20]
|
serverid = hashutil.tagged_hash("serverid", str(i))[:20]
|
||||||
|
@ -350,6 +369,9 @@ class GridTestMixin:
|
||||||
num_servers=num_servers,
|
num_servers=num_servers,
|
||||||
client_config_hooks=client_config_hooks)
|
client_config_hooks=client_config_hooks)
|
||||||
self.g.setServiceParent(self.s)
|
self.g.setServiceParent(self.s)
|
||||||
|
self._record_webports_and_baseurls()
|
||||||
|
|
||||||
|
def _record_webports_and_baseurls(self):
|
||||||
self.client_webports = [c.getServiceNamed("webish").getPortnum()
|
self.client_webports = [c.getServiceNamed("webish").getPortnum()
|
||||||
for c in self.g.clients]
|
for c in self.g.clients]
|
||||||
self.client_baseurls = [c.getServiceNamed("webish").getURL()
|
self.client_baseurls = [c.getServiceNamed("webish").getURL()
|
||||||
|
@ -358,6 +380,23 @@ class GridTestMixin:
|
||||||
def get_clientdir(self, i=0):
|
def get_clientdir(self, i=0):
|
||||||
return self.g.clients[i].basedir
|
return self.g.clients[i].basedir
|
||||||
|
|
||||||
|
def set_clientdir(self, basedir, i=0):
|
||||||
|
self.g.clients[i].basedir = basedir
|
||||||
|
|
||||||
|
def get_client(self, i=0):
|
||||||
|
return self.g.clients[i]
|
||||||
|
|
||||||
|
def restart_client(self, i=0):
|
||||||
|
client = self.g.clients[i]
|
||||||
|
d = defer.succeed(None)
|
||||||
|
d.addCallback(lambda ign: self.g.removeService(client))
|
||||||
|
def _make_client(ign):
|
||||||
|
c = self.g.make_client(i, write_config=False)
|
||||||
|
self.g.clients[i] = c
|
||||||
|
self._record_webports_and_baseurls()
|
||||||
|
d.addCallback(_make_client)
|
||||||
|
return d
|
||||||
|
|
||||||
def get_serverdir(self, i):
|
def get_serverdir(self, i):
|
||||||
return self.g.servers_by_number[i].storedir
|
return self.g.servers_by_number[i].storedir
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ from twisted.trial import unittest
|
||||||
from allmydata.util import fileutil
|
from allmydata.util import fileutil
|
||||||
from allmydata.util.encodingutil import listdir_unicode, get_filesystem_encoding, unicode_platform
|
from allmydata.util.encodingutil import listdir_unicode, get_filesystem_encoding, unicode_platform
|
||||||
from allmydata.util.assertutil import precondition
|
from allmydata.util.assertutil import precondition
|
||||||
from allmydata.scripts import backupdb
|
from allmydata import backupdb
|
||||||
|
|
||||||
class BackupDB(unittest.TestCase):
|
class BackupDB(unittest.TestCase):
|
||||||
def create(self, dbfile):
|
def create(self, dbfile):
|
||||||
|
|
|
@ -22,7 +22,7 @@ class FakeClient:
|
||||||
class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
|
class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
|
||||||
|
|
||||||
def create_fake_client(self):
|
def create_fake_client(self):
|
||||||
sb = StorageFarmBroker(None, True)
|
sb = StorageFarmBroker(None, True, 0, None)
|
||||||
# s.get_name() (the "short description") will be "v0-00000000".
|
# s.get_name() (the "short description") will be "v0-00000000".
|
||||||
# s.get_longname() will include the -long suffix.
|
# s.get_longname() will include the -long suffix.
|
||||||
# s.get_peerid() (i.e. tubid) will be "aaa.." or "777.." or "ceir.."
|
# s.get_peerid() (i.e. tubid) will be "aaa.." or "777.." or "ceir.."
|
||||||
|
@ -41,7 +41,7 @@ class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
|
||||||
"my-version": "ver",
|
"my-version": "ver",
|
||||||
"oldest-supported": "oldest",
|
"oldest-supported": "oldest",
|
||||||
}
|
}
|
||||||
s = NativeStorageServer(key_s, ann)
|
s = NativeStorageServer(key_s, ann, sb)
|
||||||
sb.test_add_server(peerid, s) # XXX: maybe use key_s?
|
sb.test_add_server(peerid, s) # XXX: maybe use key_s?
|
||||||
c = FakeClient()
|
c = FakeClient()
|
||||||
c.storage_broker = sb
|
c.storage_broker = sb
|
||||||
|
|
|
@ -49,8 +49,11 @@ def parse_options(basedir, command, args):
|
||||||
|
|
||||||
class CLITestMixin(ReallyEqualMixin):
|
class CLITestMixin(ReallyEqualMixin):
|
||||||
def do_cli(self, verb, *args, **kwargs):
|
def do_cli(self, verb, *args, **kwargs):
|
||||||
|
# client_num is used to execute client CLI commands on a specific client.
|
||||||
|
client_num = kwargs.get("client_num", 0)
|
||||||
|
|
||||||
nodeargs = [
|
nodeargs = [
|
||||||
"--node-directory", self.get_clientdir(),
|
"--node-directory", self.get_clientdir(i=client_num),
|
||||||
]
|
]
|
||||||
argv = nodeargs + [verb] + list(args)
|
argv = nodeargs + [verb] + list(args)
|
||||||
stdin = kwargs.get("stdin", "")
|
stdin = kwargs.get("stdin", "")
|
||||||
|
|
|
@ -11,7 +11,8 @@ from allmydata.util import fileutil
|
||||||
from allmydata.util.fileutil import abspath_expanduser_unicode
|
from allmydata.util.fileutil import abspath_expanduser_unicode
|
||||||
from allmydata.util.encodingutil import get_io_encoding, unicode_to_argv
|
from allmydata.util.encodingutil import get_io_encoding, unicode_to_argv
|
||||||
from allmydata.util.namespace import Namespace
|
from allmydata.util.namespace import Namespace
|
||||||
from allmydata.scripts import cli, backupdb
|
from allmydata.scripts import cli
|
||||||
|
from allmydata import backupdb
|
||||||
from .common_util import StallMixin
|
from .common_util import StallMixin
|
||||||
from .no_network import GridTestMixin
|
from .no_network import GridTestMixin
|
||||||
from .test_cli import CLITestMixin, parse_options
|
from .test_cli import CLITestMixin, parse_options
|
||||||
|
|
|
@ -0,0 +1,205 @@
|
||||||
|
import os.path
|
||||||
|
import re
|
||||||
|
|
||||||
|
from twisted.trial import unittest
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from allmydata.util import fileutil
|
||||||
|
from allmydata.scripts.common import get_aliases
|
||||||
|
from allmydata.test.no_network import GridTestMixin
|
||||||
|
from .test_cli import CLITestMixin
|
||||||
|
from allmydata.scripts import magic_folder_cli
|
||||||
|
from allmydata.util.fileutil import abspath_expanduser_unicode
|
||||||
|
from allmydata.frontends.magic_folder import MagicFolder
|
||||||
|
from allmydata import uri
|
||||||
|
|
||||||
|
|
||||||
|
class MagicFolderCLITestMixin(CLITestMixin, GridTestMixin):
|
||||||
|
|
||||||
|
def do_create_magic_folder(self, client_num):
|
||||||
|
d = self.do_cli("magic-folder", "create", "magic:", client_num=client_num)
|
||||||
|
def _done((rc,stdout,stderr)):
|
||||||
|
self.failUnlessEqual(rc, 0)
|
||||||
|
self.failUnlessIn("Alias 'magic' created", stdout)
|
||||||
|
self.failUnlessEqual(stderr, "")
|
||||||
|
aliases = get_aliases(self.get_clientdir(i=client_num))
|
||||||
|
self.failUnlessIn("magic", aliases)
|
||||||
|
self.failUnless(aliases["magic"].startswith("URI:DIR2:"))
|
||||||
|
d.addCallback(_done)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def do_invite(self, client_num, nickname):
|
||||||
|
d = self.do_cli("magic-folder", "invite", u"magic:", nickname, client_num=client_num)
|
||||||
|
def _done((rc,stdout,stderr)):
|
||||||
|
self.failUnless(rc == 0)
|
||||||
|
return (rc,stdout,stderr)
|
||||||
|
d.addCallback(_done)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def do_join(self, client_num, local_dir, invite_code):
|
||||||
|
magic_readonly_cap, dmd_write_cap = invite_code.split(magic_folder_cli.INVITE_SEPARATOR)
|
||||||
|
d = self.do_cli("magic-folder", "join", invite_code, local_dir, client_num=client_num)
|
||||||
|
def _done((rc,stdout,stderr)):
|
||||||
|
self.failUnless(rc == 0)
|
||||||
|
return (rc,stdout,stderr)
|
||||||
|
d.addCallback(_done)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def check_joined_config(self, client_num, upload_dircap):
|
||||||
|
"""Tests that our collective directory has the readonly cap of
|
||||||
|
our upload directory.
|
||||||
|
"""
|
||||||
|
collective_readonly_cap = fileutil.read(os.path.join(self.get_clientdir(i=client_num), "private/collective_dircap"))
|
||||||
|
d = self.do_cli("ls", "--json", collective_readonly_cap, client_num=client_num)
|
||||||
|
def _done((rc,stdout,stderr)):
|
||||||
|
self.failUnless(rc == 0)
|
||||||
|
return (rc,stdout,stderr)
|
||||||
|
d.addCallback(_done)
|
||||||
|
def test_joined_magic_folder((rc,stdout,stderr)):
|
||||||
|
readonly_cap = unicode(uri.from_string(upload_dircap).get_readonly().to_string(), 'utf-8')
|
||||||
|
s = re.search(readonly_cap, stdout)
|
||||||
|
self.failUnless(s is not None)
|
||||||
|
return None
|
||||||
|
d.addCallback(test_joined_magic_folder)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def get_caps_from_files(self, client_num):
|
||||||
|
collective_dircap = fileutil.read(os.path.join(self.get_clientdir(i=client_num), "private/collective_dircap"))
|
||||||
|
upload_dircap = fileutil.read(os.path.join(self.get_clientdir(i=client_num), "private/magic_folder_dircap"))
|
||||||
|
self.failIf(collective_dircap is None or upload_dircap is None)
|
||||||
|
return collective_dircap, upload_dircap
|
||||||
|
|
||||||
|
def check_config(self, client_num, local_dir):
|
||||||
|
client_config = fileutil.read(os.path.join(self.get_clientdir(i=client_num), "tahoe.cfg"))
|
||||||
|
# XXX utf-8?
|
||||||
|
local_dir = local_dir.encode('utf-8')
|
||||||
|
ret = re.search("\[magic_folder\]\nenabled = True\nlocal.directory = %s" % (local_dir,), client_config)
|
||||||
|
self.failIf(ret is None)
|
||||||
|
|
||||||
|
def create_invite_join_magic_folder(self, nickname, local_dir):
|
||||||
|
d = self.do_cli("magic-folder", "create", u"magic:", nickname, local_dir)
|
||||||
|
def _done((rc,stdout,stderr)):
|
||||||
|
self.failUnless(rc == 0)
|
||||||
|
return (rc,stdout,stderr)
|
||||||
|
d.addCallback(_done)
|
||||||
|
def get_alice_caps(x):
|
||||||
|
client = self.get_client()
|
||||||
|
self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0)
|
||||||
|
self.collective_dirnode = client.create_node_from_uri(self.collective_dircap)
|
||||||
|
self.upload_dirnode = client.create_node_from_uri(self.upload_dircap)
|
||||||
|
d.addCallback(get_alice_caps)
|
||||||
|
d.addCallback(lambda x: self.check_joined_config(0, self.upload_dircap))
|
||||||
|
d.addCallback(lambda x: self.check_config(0, local_dir))
|
||||||
|
return d
|
||||||
|
|
||||||
|
def cleanup(self, res):
|
||||||
|
#print "cleanup", res
|
||||||
|
d = defer.succeed(None)
|
||||||
|
if self.magicfolder is not None:
|
||||||
|
d.addCallback(lambda ign: self.magicfolder.finish())
|
||||||
|
d.addCallback(lambda ign: res)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def init_magicfolder(self, client_num, upload_dircap, collective_dircap, local_magic_dir):
|
||||||
|
dbfile = abspath_expanduser_unicode(u"magicfolderdb.sqlite", base=self.get_clientdir(i=client_num))
|
||||||
|
magicfolder = MagicFolder(self.get_client(client_num), upload_dircap, collective_dircap, local_magic_dir,
|
||||||
|
dbfile, pending_delay=0.2)
|
||||||
|
magicfolder.setServiceParent(self.get_client(client_num))
|
||||||
|
magicfolder.ready()
|
||||||
|
return magicfolder
|
||||||
|
|
||||||
|
def setup_alice_and_bob(self):
|
||||||
|
self.set_up_grid(num_clients=2)
|
||||||
|
|
||||||
|
alice_magic_dir = abspath_expanduser_unicode(u"Alice-magic", base=self.basedir)
|
||||||
|
self.mkdir_nonascii(alice_magic_dir)
|
||||||
|
bob_magic_dir = abspath_expanduser_unicode(u"Bob-magic", base=self.basedir)
|
||||||
|
self.mkdir_nonascii(bob_magic_dir)
|
||||||
|
|
||||||
|
# Alice creates a Magic Folder,
|
||||||
|
# invites herself then and joins.
|
||||||
|
d = self.do_create_magic_folder(0)
|
||||||
|
d.addCallback(lambda x: self.do_invite(0, u"Alice\u00F8"))
|
||||||
|
def get_invitecode(result):
|
||||||
|
self.invitecode = result[1].strip()
|
||||||
|
d.addCallback(get_invitecode)
|
||||||
|
d.addCallback(lambda x: self.do_join(0, alice_magic_dir, self.invitecode))
|
||||||
|
def get_alice_caps(x):
|
||||||
|
self.alice_collective_dircap, self.alice_upload_dircap = self.get_caps_from_files(0)
|
||||||
|
d.addCallback(get_alice_caps)
|
||||||
|
d.addCallback(lambda x: self.check_joined_config(0, self.alice_upload_dircap))
|
||||||
|
d.addCallback(lambda x: self.check_config(0, alice_magic_dir))
|
||||||
|
def get_Alice_magicfolder(result):
|
||||||
|
self.alice_magicfolder = self.init_magicfolder(0, self.alice_upload_dircap, self.alice_collective_dircap, alice_magic_dir)
|
||||||
|
return result
|
||||||
|
d.addCallback(get_Alice_magicfolder)
|
||||||
|
|
||||||
|
# Alice invites Bob. Bob joins.
|
||||||
|
d.addCallback(lambda x: self.do_invite(0, u"Bob\u00F8"))
|
||||||
|
def get_invitecode(result):
|
||||||
|
self.invitecode = result[1].strip()
|
||||||
|
d.addCallback(get_invitecode)
|
||||||
|
d.addCallback(lambda x: self.do_join(1, bob_magic_dir, self.invitecode))
|
||||||
|
def get_bob_caps(x):
|
||||||
|
self.bob_collective_dircap, self.bob_upload_dircap = self.get_caps_from_files(1)
|
||||||
|
d.addCallback(get_bob_caps)
|
||||||
|
d.addCallback(lambda x: self.check_joined_config(1, self.bob_upload_dircap))
|
||||||
|
d.addCallback(lambda x: self.check_config(1, bob_magic_dir))
|
||||||
|
def get_Bob_magicfolder(result):
|
||||||
|
self.bob_magicfolder = self.init_magicfolder(1, self.bob_upload_dircap, self.bob_collective_dircap, bob_magic_dir)
|
||||||
|
return result
|
||||||
|
d.addCallback(get_Bob_magicfolder)
|
||||||
|
|
||||||
|
def prepare_result(result):
|
||||||
|
# XXX improve this
|
||||||
|
return (self.alice_collective_dircap, self.alice_upload_dircap, self.alice_magicfolder,
|
||||||
|
self.bob_collective_dircap, self.bob_upload_dircap, self.bob_magicfolder)
|
||||||
|
d.addCallback(prepare_result)
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
class CreateMagicFolder(MagicFolderCLITestMixin, unittest.TestCase):
|
||||||
|
|
||||||
|
def test_create_and_then_invite_join(self):
|
||||||
|
self.basedir = "cli/MagicFolder/create-and-then-invite-join"
|
||||||
|
self.set_up_grid()
|
||||||
|
self.local_dir = os.path.join(self.basedir, "magic")
|
||||||
|
d = self.do_create_magic_folder(0)
|
||||||
|
d.addCallback(lambda x: self.do_invite(0, u"Alice"))
|
||||||
|
def get_invite((rc,stdout,stderr)):
|
||||||
|
self.invite_code = stdout.strip()
|
||||||
|
d.addCallback(get_invite)
|
||||||
|
d.addCallback(lambda x: self.do_join(0, self.local_dir, self.invite_code))
|
||||||
|
def get_caps(x):
|
||||||
|
self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0)
|
||||||
|
d.addCallback(get_caps)
|
||||||
|
d.addCallback(lambda x: self.check_joined_config(0, self.upload_dircap))
|
||||||
|
d.addCallback(lambda x: self.check_config(0, self.local_dir))
|
||||||
|
return d
|
||||||
|
|
||||||
|
def test_create_error(self):
|
||||||
|
self.basedir = "cli/MagicFolder/create-error"
|
||||||
|
self.set_up_grid()
|
||||||
|
self.local_dir = os.path.join(self.basedir, "magic")
|
||||||
|
d = self.do_cli("magic-folder", "create", "m a g i c:", client_num=0)
|
||||||
|
def _done((rc,stdout,stderr)):
|
||||||
|
self.failIfEqual(rc, 0)
|
||||||
|
self.failUnlessIn("Alias names cannot contain spaces.", stderr)
|
||||||
|
d.addCallback(_done)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def test_create_invite_join(self):
|
||||||
|
self.basedir = "cli/MagicFolder/create-invite-join"
|
||||||
|
self.set_up_grid()
|
||||||
|
self.local_dir = os.path.join(self.basedir, "magic")
|
||||||
|
d = self.do_cli("magic-folder", "create", u"magic:", u"Alice", self.local_dir)
|
||||||
|
def _done((rc,stdout,stderr)):
|
||||||
|
self.failUnless(rc == 0)
|
||||||
|
return (rc,stdout,stderr)
|
||||||
|
d.addCallback(_done)
|
||||||
|
def get_caps(x):
|
||||||
|
self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0)
|
||||||
|
d.addCallback(get_caps)
|
||||||
|
d.addCallback(lambda x: self.check_joined_config(0, self.upload_dircap))
|
||||||
|
d.addCallback(lambda x: self.check_config(0, self.local_dir))
|
||||||
|
return d
|
|
@ -4,7 +4,7 @@ from twisted.trial import unittest
|
||||||
from twisted.application import service
|
from twisted.application import service
|
||||||
|
|
||||||
import allmydata
|
import allmydata
|
||||||
import allmydata.frontends.drop_upload
|
import allmydata.frontends.magic_folder
|
||||||
import allmydata.util.log
|
import allmydata.util.log
|
||||||
|
|
||||||
from allmydata.node import Node, OldConfigError, OldConfigOptionError, MissingConfigEntry, UnescapedHashError
|
from allmydata.node import Node, OldConfigError, OldConfigOptionError, MissingConfigEntry, UnescapedHashError
|
||||||
|
@ -27,7 +27,7 @@ BASECONFIG_I = ("[client]\n"
|
||||||
"introducer.furl = %s\n"
|
"introducer.furl = %s\n"
|
||||||
)
|
)
|
||||||
|
|
||||||
class Basic(testutil.ReallyEqualMixin, unittest.TestCase):
|
class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.TestCase):
|
||||||
def test_loadable(self):
|
def test_loadable(self):
|
||||||
basedir = "test_client.Basic.test_loadable"
|
basedir = "test_client.Basic.test_loadable"
|
||||||
os.mkdir(basedir)
|
os.mkdir(basedir)
|
||||||
|
@ -251,7 +251,7 @@ class Basic(testutil.ReallyEqualMixin, unittest.TestCase):
|
||||||
return [ s.get_longname() for s in sb.get_servers_for_psi(key) ]
|
return [ s.get_longname() for s in sb.get_servers_for_psi(key) ]
|
||||||
|
|
||||||
def test_permute(self):
|
def test_permute(self):
|
||||||
sb = StorageFarmBroker(None, True)
|
sb = StorageFarmBroker(None, True, 0, None)
|
||||||
for k in ["%d" % i for i in range(5)]:
|
for k in ["%d" % i for i in range(5)]:
|
||||||
ann = {"anonymous-storage-FURL": "pb://abcde@nowhere/fake",
|
ann = {"anonymous-storage-FURL": "pb://abcde@nowhere/fake",
|
||||||
"permutation-seed-base32": base32.b2a(k) }
|
"permutation-seed-base32": base32.b2a(k) }
|
||||||
|
@ -302,76 +302,79 @@ class Basic(testutil.ReallyEqualMixin, unittest.TestCase):
|
||||||
_check("helper.furl = None", None)
|
_check("helper.furl = None", None)
|
||||||
_check("helper.furl = pb://blah\n", "pb://blah")
|
_check("helper.furl = pb://blah\n", "pb://blah")
|
||||||
|
|
||||||
def test_create_drop_uploader(self):
|
def test_create_magic_folder_service(self):
|
||||||
class MockDropUploader(service.MultiService):
|
class MockMagicFolder(service.MultiService):
|
||||||
name = 'drop-upload'
|
name = 'magic-folder'
|
||||||
|
|
||||||
def __init__(self, client, upload_dircap, local_dir_utf8, inotify=None):
|
def __init__(self, client, upload_dircap, collective_dircap, local_dir, dbfile, inotify=None,
|
||||||
|
pending_delay=1.0):
|
||||||
service.MultiService.__init__(self)
|
service.MultiService.__init__(self)
|
||||||
self.client = client
|
self.client = client
|
||||||
self.upload_dircap = upload_dircap
|
self.upload_dircap = upload_dircap
|
||||||
self.local_dir_utf8 = local_dir_utf8
|
self.collective_dircap = collective_dircap
|
||||||
|
self.local_dir = local_dir
|
||||||
|
self.dbfile = dbfile
|
||||||
self.inotify = inotify
|
self.inotify = inotify
|
||||||
|
|
||||||
self.patch(allmydata.frontends.drop_upload, 'DropUploader', MockDropUploader)
|
def ready(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.patch(allmydata.frontends.magic_folder, 'MagicFolder', MockMagicFolder)
|
||||||
|
|
||||||
upload_dircap = "URI:DIR2:blah"
|
upload_dircap = "URI:DIR2:blah"
|
||||||
local_dir_utf8 = u"loc\u0101l_dir".encode('utf-8')
|
local_dir_u = self.unicode_or_fallback(u"loc\u0101l_dir", u"local_dir")
|
||||||
|
local_dir_utf8 = local_dir_u.encode('utf-8')
|
||||||
config = (BASECONFIG +
|
config = (BASECONFIG +
|
||||||
"[storage]\n" +
|
"[storage]\n" +
|
||||||
"enabled = false\n" +
|
"enabled = false\n" +
|
||||||
"[drop_upload]\n" +
|
"[magic_folder]\n" +
|
||||||
"enabled = true\n")
|
"enabled = true\n")
|
||||||
|
|
||||||
basedir1 = "test_client.Basic.test_create_drop_uploader1"
|
basedir1 = "test_client.Basic.test_create_magic_folder_service1"
|
||||||
os.mkdir(basedir1)
|
os.mkdir(basedir1)
|
||||||
|
|
||||||
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
|
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
|
||||||
config + "local.directory = " + local_dir_utf8 + "\n")
|
config + "local.directory = " + local_dir_utf8 + "\n")
|
||||||
self.failUnlessRaises(MissingConfigEntry, client.Client, basedir1)
|
self.failUnlessRaises(MissingConfigEntry, client.Client, basedir1)
|
||||||
|
|
||||||
fileutil.write(os.path.join(basedir1, "tahoe.cfg"), config)
|
fileutil.write(os.path.join(basedir1, "tahoe.cfg"), config)
|
||||||
fileutil.write(os.path.join(basedir1, "private", "drop_upload_dircap"), "URI:DIR2:blah")
|
fileutil.write(os.path.join(basedir1, "private", "magic_folder_dircap"), "URI:DIR2:blah")
|
||||||
|
fileutil.write(os.path.join(basedir1, "private", "collective_dircap"), "URI:DIR2:meow")
|
||||||
self.failUnlessRaises(MissingConfigEntry, client.Client, basedir1)
|
self.failUnlessRaises(MissingConfigEntry, client.Client, basedir1)
|
||||||
|
|
||||||
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
|
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
|
||||||
config + "upload.dircap = " + upload_dircap + "\n")
|
config.replace("[magic_folder]\n", "[drop_upload]\n"))
|
||||||
self.failUnlessRaises(OldConfigOptionError, client.Client, basedir1)
|
self.failUnlessRaises(OldConfigOptionError, client.Client, basedir1)
|
||||||
|
|
||||||
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
|
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
|
||||||
config + "local.directory = " + local_dir_utf8 + "\n")
|
config + "local.directory = " + local_dir_utf8 + "\n")
|
||||||
c1 = client.Client(basedir1)
|
c1 = client.Client(basedir1)
|
||||||
uploader = c1.getServiceNamed('drop-upload')
|
magicfolder = c1.getServiceNamed('magic-folder')
|
||||||
self.failUnless(isinstance(uploader, MockDropUploader), uploader)
|
self.failUnless(isinstance(magicfolder, MockMagicFolder), magicfolder)
|
||||||
self.failUnlessReallyEqual(uploader.client, c1)
|
self.failUnlessReallyEqual(magicfolder.client, c1)
|
||||||
self.failUnlessReallyEqual(uploader.upload_dircap, upload_dircap)
|
self.failUnlessReallyEqual(magicfolder.upload_dircap, upload_dircap)
|
||||||
self.failUnlessReallyEqual(uploader.local_dir_utf8, local_dir_utf8)
|
self.failUnlessReallyEqual(os.path.basename(magicfolder.local_dir), local_dir_u)
|
||||||
self.failUnless(uploader.inotify is None, uploader.inotify)
|
self.failUnless(magicfolder.inotify is None, magicfolder.inotify)
|
||||||
self.failUnless(uploader.running)
|
self.failUnless(magicfolder.running)
|
||||||
|
|
||||||
class Boom(Exception):
|
class Boom(Exception):
|
||||||
pass
|
pass
|
||||||
def BoomDropUploader(client, upload_dircap, local_dir_utf8, inotify=None):
|
def BoomMagicFolder(client, upload_dircap, collective_dircap, local_dir, dbfile,
|
||||||
|
inotify=None, pending_delay=1.0):
|
||||||
raise Boom()
|
raise Boom()
|
||||||
|
self.patch(allmydata.frontends.magic_folder, 'MagicFolder', BoomMagicFolder)
|
||||||
|
|
||||||
logged_messages = []
|
basedir2 = "test_client.Basic.test_create_magic_folder_service2"
|
||||||
def mock_log(*args, **kwargs):
|
|
||||||
logged_messages.append("%r %r" % (args, kwargs))
|
|
||||||
self.patch(allmydata.util.log, 'msg', mock_log)
|
|
||||||
self.patch(allmydata.frontends.drop_upload, 'DropUploader', BoomDropUploader)
|
|
||||||
|
|
||||||
basedir2 = "test_client.Basic.test_create_drop_uploader2"
|
|
||||||
os.mkdir(basedir2)
|
os.mkdir(basedir2)
|
||||||
os.mkdir(os.path.join(basedir2, "private"))
|
os.mkdir(os.path.join(basedir2, "private"))
|
||||||
fileutil.write(os.path.join(basedir2, "tahoe.cfg"),
|
fileutil.write(os.path.join(basedir2, "tahoe.cfg"),
|
||||||
BASECONFIG +
|
BASECONFIG +
|
||||||
"[drop_upload]\n" +
|
"[magic_folder]\n" +
|
||||||
"enabled = true\n" +
|
"enabled = true\n" +
|
||||||
"local.directory = " + local_dir_utf8 + "\n")
|
"local.directory = " + local_dir_utf8 + "\n")
|
||||||
fileutil.write(os.path.join(basedir2, "private", "drop_upload_dircap"), "URI:DIR2:blah")
|
fileutil.write(os.path.join(basedir2, "private", "magic_folder_dircap"), "URI:DIR2:blah")
|
||||||
c2 = client.Client(basedir2)
|
fileutil.write(os.path.join(basedir2, "private", "collective_dircap"), "URI:DIR2:meow")
|
||||||
self.failUnlessRaises(KeyError, c2.getServiceNamed, 'drop-upload')
|
self.failUnlessRaises(Boom, client.Client, basedir2)
|
||||||
self.failUnless([True for arg in logged_messages if "Boom" in arg],
|
|
||||||
logged_messages)
|
|
||||||
|
|
||||||
|
|
||||||
def flush_but_dont_ignore(res):
|
def flush_but_dont_ignore(res):
|
||||||
|
|
|
@ -1,181 +0,0 @@
|
||||||
|
|
||||||
import os, sys
|
|
||||||
|
|
||||||
from twisted.trial import unittest
|
|
||||||
from twisted.python import filepath, runtime
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
from allmydata.interfaces import IDirectoryNode, NoSuchChildError
|
|
||||||
|
|
||||||
from allmydata.util import fake_inotify
|
|
||||||
from allmydata.util.encodingutil import get_filesystem_encoding
|
|
||||||
from allmydata.util.consumer import download_to_data
|
|
||||||
from allmydata.test.no_network import GridTestMixin
|
|
||||||
from allmydata.test.common_util import ReallyEqualMixin, NonASCIIPathMixin
|
|
||||||
from allmydata.test.common import ShouldFailMixin
|
|
||||||
|
|
||||||
from allmydata.frontends.drop_upload import DropUploader
|
|
||||||
|
|
||||||
|
|
||||||
class DropUploadTestMixin(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, NonASCIIPathMixin):
|
|
||||||
"""
|
|
||||||
These tests will be run both with a mock notifier, and (on platforms that support it)
|
|
||||||
with the real INotify.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def _get_count(self, name):
|
|
||||||
return self.stats_provider.get_stats()["counters"].get(name, 0)
|
|
||||||
|
|
||||||
def _test(self):
|
|
||||||
self.uploader = None
|
|
||||||
self.set_up_grid()
|
|
||||||
self.local_dir = os.path.join(self.basedir, self.unicode_or_fallback(u"loc\u0101l_dir", u"local_dir"))
|
|
||||||
self.mkdir_nonascii(self.local_dir)
|
|
||||||
|
|
||||||
self.client = self.g.clients[0]
|
|
||||||
self.stats_provider = self.client.stats_provider
|
|
||||||
|
|
||||||
d = self.client.create_dirnode()
|
|
||||||
def _made_upload_dir(n):
|
|
||||||
self.failUnless(IDirectoryNode.providedBy(n))
|
|
||||||
self.upload_dirnode = n
|
|
||||||
self.upload_dircap = n.get_uri()
|
|
||||||
self.uploader = DropUploader(self.client, self.upload_dircap, self.local_dir.encode('utf-8'),
|
|
||||||
inotify=self.inotify)
|
|
||||||
return self.uploader.startService()
|
|
||||||
d.addCallback(_made_upload_dir)
|
|
||||||
|
|
||||||
# Write something short enough for a LIT file.
|
|
||||||
d.addCallback(lambda ign: self._test_file(u"short", "test"))
|
|
||||||
|
|
||||||
# Write to the same file again with different data.
|
|
||||||
d.addCallback(lambda ign: self._test_file(u"short", "different"))
|
|
||||||
|
|
||||||
# Test that temporary files are not uploaded.
|
|
||||||
d.addCallback(lambda ign: self._test_file(u"tempfile", "test", temporary=True))
|
|
||||||
|
|
||||||
# Test that we tolerate creation of a subdirectory.
|
|
||||||
d.addCallback(lambda ign: os.mkdir(os.path.join(self.local_dir, u"directory")))
|
|
||||||
|
|
||||||
# Write something longer, and also try to test a Unicode name if the fs can represent it.
|
|
||||||
name_u = self.unicode_or_fallback(u"l\u00F8ng", u"long")
|
|
||||||
d.addCallback(lambda ign: self._test_file(name_u, "test"*100))
|
|
||||||
|
|
||||||
# TODO: test that causes an upload failure.
|
|
||||||
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('drop_upload.files_failed'), 0))
|
|
||||||
|
|
||||||
# Prevent unclean reactor errors.
|
|
||||||
def _cleanup(res):
|
|
||||||
d = defer.succeed(None)
|
|
||||||
if self.uploader is not None:
|
|
||||||
d.addCallback(lambda ign: self.uploader.finish(for_tests=True))
|
|
||||||
d.addCallback(lambda ign: res)
|
|
||||||
return d
|
|
||||||
d.addBoth(_cleanup)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def _test_file(self, name_u, data, temporary=False):
|
|
||||||
previously_uploaded = self._get_count('drop_upload.files_uploaded')
|
|
||||||
previously_disappeared = self._get_count('drop_upload.files_disappeared')
|
|
||||||
|
|
||||||
d = defer.Deferred()
|
|
||||||
|
|
||||||
# Note: this relies on the fact that we only get one IN_CLOSE_WRITE notification per file
|
|
||||||
# (otherwise we would get a defer.AlreadyCalledError). Should we be relying on that?
|
|
||||||
self.uploader.set_uploaded_callback(d.callback)
|
|
||||||
|
|
||||||
path_u = os.path.join(self.local_dir, name_u)
|
|
||||||
if sys.platform == "win32":
|
|
||||||
path = filepath.FilePath(path_u)
|
|
||||||
else:
|
|
||||||
path = filepath.FilePath(path_u.encode(get_filesystem_encoding()))
|
|
||||||
|
|
||||||
# We don't use FilePath.setContent() here because it creates a temporary file that
|
|
||||||
# is renamed into place, which causes events that the test is not expecting.
|
|
||||||
f = open(path.path, "wb")
|
|
||||||
try:
|
|
||||||
if temporary and sys.platform != "win32":
|
|
||||||
os.unlink(path.path)
|
|
||||||
f.write(data)
|
|
||||||
finally:
|
|
||||||
f.close()
|
|
||||||
if temporary and sys.platform == "win32":
|
|
||||||
os.unlink(path.path)
|
|
||||||
self.notify_close_write(path)
|
|
||||||
|
|
||||||
if temporary:
|
|
||||||
d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, 'temp file not uploaded', None,
|
|
||||||
self.upload_dirnode.get, name_u))
|
|
||||||
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('drop_upload.files_disappeared'),
|
|
||||||
previously_disappeared + 1))
|
|
||||||
else:
|
|
||||||
d.addCallback(lambda ign: self.upload_dirnode.get(name_u))
|
|
||||||
d.addCallback(download_to_data)
|
|
||||||
d.addCallback(lambda actual_data: self.failUnlessReallyEqual(actual_data, data))
|
|
||||||
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('drop_upload.files_uploaded'),
|
|
||||||
previously_uploaded + 1))
|
|
||||||
|
|
||||||
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('drop_upload.files_queued'), 0))
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
class MockTest(DropUploadTestMixin, unittest.TestCase):
|
|
||||||
"""This can run on any platform, and even if twisted.internet.inotify can't be imported."""
|
|
||||||
|
|
||||||
def test_errors(self):
|
|
||||||
self.basedir = "drop_upload.MockTest.test_errors"
|
|
||||||
self.set_up_grid()
|
|
||||||
errors_dir = os.path.join(self.basedir, "errors_dir")
|
|
||||||
os.mkdir(errors_dir)
|
|
||||||
|
|
||||||
client = self.g.clients[0]
|
|
||||||
d = client.create_dirnode()
|
|
||||||
def _made_upload_dir(n):
|
|
||||||
self.failUnless(IDirectoryNode.providedBy(n))
|
|
||||||
upload_dircap = n.get_uri()
|
|
||||||
readonly_dircap = n.get_readonly_uri()
|
|
||||||
|
|
||||||
self.shouldFail(AssertionError, 'invalid local.directory', 'could not be represented',
|
|
||||||
DropUploader, client, upload_dircap, '\xFF', inotify=fake_inotify)
|
|
||||||
self.shouldFail(AssertionError, 'nonexistent local.directory', 'there is no directory',
|
|
||||||
DropUploader, client, upload_dircap, os.path.join(self.basedir, "Laputa"), inotify=fake_inotify)
|
|
||||||
|
|
||||||
fp = filepath.FilePath(self.basedir).child('NOT_A_DIR')
|
|
||||||
fp.touch()
|
|
||||||
self.shouldFail(AssertionError, 'non-directory local.directory', 'is not a directory',
|
|
||||||
DropUploader, client, upload_dircap, fp.path, inotify=fake_inotify)
|
|
||||||
|
|
||||||
self.shouldFail(AssertionError, 'bad upload.dircap', 'does not refer to a directory',
|
|
||||||
DropUploader, client, 'bad', errors_dir, inotify=fake_inotify)
|
|
||||||
self.shouldFail(AssertionError, 'non-directory upload.dircap', 'does not refer to a directory',
|
|
||||||
DropUploader, client, 'URI:LIT:foo', errors_dir, inotify=fake_inotify)
|
|
||||||
self.shouldFail(AssertionError, 'readonly upload.dircap', 'is not a writecap to a directory',
|
|
||||||
DropUploader, client, readonly_dircap, errors_dir, inotify=fake_inotify)
|
|
||||||
d.addCallback(_made_upload_dir)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def test_drop_upload(self):
|
|
||||||
self.inotify = fake_inotify
|
|
||||||
self.basedir = "drop_upload.MockTest.test_drop_upload"
|
|
||||||
return self._test()
|
|
||||||
|
|
||||||
def notify_close_write(self, path):
|
|
||||||
self.uploader._notifier.event(path, self.inotify.IN_CLOSE_WRITE)
|
|
||||||
|
|
||||||
|
|
||||||
class RealTest(DropUploadTestMixin, unittest.TestCase):
|
|
||||||
"""This is skipped unless both Twisted and the platform support inotify."""
|
|
||||||
|
|
||||||
def test_drop_upload(self):
|
|
||||||
# We should always have runtime.platform.supportsINotify, because we're using
|
|
||||||
# Twisted >= 10.1.
|
|
||||||
if not runtime.platform.supportsINotify():
|
|
||||||
raise unittest.SkipTest("Drop-upload support can only be tested for-real on an OS that supports inotify or equivalent.")
|
|
||||||
|
|
||||||
self.inotify = None # use the appropriate inotify for the platform
|
|
||||||
self.basedir = "drop_upload.RealTest.test_drop_upload"
|
|
||||||
return self._test()
|
|
||||||
|
|
||||||
def notify_close_write(self, path):
|
|
||||||
# Writing to the file causes the notification.
|
|
||||||
pass
|
|
|
@ -116,7 +116,7 @@ class AssistedUpload(unittest.TestCase):
|
||||||
timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
|
timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.s = FakeClient()
|
self.s = FakeClient()
|
||||||
self.s.storage_broker = StorageFarmBroker(None, True)
|
self.s.storage_broker = StorageFarmBroker(None, True, 0, None)
|
||||||
self.s.secret_holder = client.SecretHolder("lease secret", "converge")
|
self.s.secret_holder = client.SecretHolder("lease secret", "converge")
|
||||||
self.s.startService()
|
self.s.startService()
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,484 @@
|
||||||
|
|
||||||
|
import os, sys, stat, time
|
||||||
|
|
||||||
|
from twisted.trial import unittest
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from allmydata.interfaces import IDirectoryNode
|
||||||
|
|
||||||
|
from allmydata.util import fake_inotify, fileutil
|
||||||
|
from allmydata.util.encodingutil import get_filesystem_encoding, to_filepath
|
||||||
|
from allmydata.util.consumer import download_to_data
|
||||||
|
from allmydata.test.no_network import GridTestMixin
|
||||||
|
from allmydata.test.common_util import ReallyEqualMixin, NonASCIIPathMixin
|
||||||
|
from allmydata.test.common import ShouldFailMixin
|
||||||
|
from .test_cli_magic_folder import MagicFolderCLITestMixin
|
||||||
|
|
||||||
|
from allmydata.frontends import magic_folder
|
||||||
|
from allmydata.frontends.magic_folder import MagicFolder, Downloader
|
||||||
|
from allmydata import backupdb, magicpath
|
||||||
|
from allmydata.util.fileutil import abspath_expanduser_unicode
|
||||||
|
|
||||||
|
|
||||||
|
class MagicFolderTestMixin(MagicFolderCLITestMixin, ShouldFailMixin, ReallyEqualMixin, NonASCIIPathMixin):
|
||||||
|
"""
|
||||||
|
These tests will be run both with a mock notifier, and (on platforms that support it)
|
||||||
|
with the real INotify.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
GridTestMixin.setUp(self)
|
||||||
|
temp = self.mktemp()
|
||||||
|
self.basedir = abspath_expanduser_unicode(temp.decode(get_filesystem_encoding()))
|
||||||
|
self.magicfolder = None
|
||||||
|
|
||||||
|
def _get_count(self, name, client=None):
|
||||||
|
counters = (client or self.get_client()).stats_provider.get_stats()["counters"]
|
||||||
|
return counters.get('magic_folder.%s' % (name,), 0)
|
||||||
|
|
||||||
|
def _createdb(self):
|
||||||
|
dbfile = abspath_expanduser_unicode(u"magicfolderdb.sqlite", base=self.basedir)
|
||||||
|
bdb = backupdb.get_backupdb(dbfile, create_version=(backupdb.SCHEMA_v3, 3))
|
||||||
|
self.failUnless(bdb, "unable to create backupdb from %r" % (dbfile,))
|
||||||
|
self.failUnlessEqual(bdb.VERSION, 3)
|
||||||
|
return bdb
|
||||||
|
|
||||||
|
def _restart_client(self, ign):
|
||||||
|
#print "_restart_client"
|
||||||
|
d = self.restart_client()
|
||||||
|
d.addCallback(self._wait_until_started)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def _wait_until_started(self, ign):
|
||||||
|
#print "_wait_until_started"
|
||||||
|
self.magicfolder = self.get_client().getServiceNamed('magic-folder')
|
||||||
|
return self.magicfolder.ready()
|
||||||
|
|
||||||
|
def test_db_basic(self):
|
||||||
|
fileutil.make_dirs(self.basedir)
|
||||||
|
self._createdb()
|
||||||
|
|
||||||
|
def test_db_persistence(self):
|
||||||
|
"""Test that a file upload creates an entry in the database."""
|
||||||
|
|
||||||
|
fileutil.make_dirs(self.basedir)
|
||||||
|
db = self._createdb()
|
||||||
|
|
||||||
|
path = abspath_expanduser_unicode(u"myFile1", base=self.basedir)
|
||||||
|
db.did_upload_file('URI:LIT:1', path, 1, 0, 0, 33)
|
||||||
|
|
||||||
|
c = db.cursor
|
||||||
|
c.execute("SELECT size,mtime,ctime,fileid"
|
||||||
|
" FROM local_files"
|
||||||
|
" WHERE path=?",
|
||||||
|
(path,))
|
||||||
|
row = db.cursor.fetchone()
|
||||||
|
self.failIfEqual(row, None)
|
||||||
|
|
||||||
|
# Second test uses db.check_file instead of SQL query directly
|
||||||
|
# to confirm the previous upload entry in the db.
|
||||||
|
path = abspath_expanduser_unicode(u"myFile2", base=self.basedir)
|
||||||
|
fileutil.write(path, "meow\n")
|
||||||
|
s = os.stat(path)
|
||||||
|
size = s[stat.ST_SIZE]
|
||||||
|
ctime = s[stat.ST_CTIME]
|
||||||
|
mtime = s[stat.ST_MTIME]
|
||||||
|
db.did_upload_file('URI:LIT:2', path, 1, mtime, ctime, size)
|
||||||
|
r = db.check_file(path)
|
||||||
|
self.failUnless(r.was_uploaded())
|
||||||
|
|
||||||
|
def test_magicfolder_start_service(self):
|
||||||
|
self.set_up_grid()
|
||||||
|
|
||||||
|
self.local_dir = abspath_expanduser_unicode(self.unicode_or_fallback(u"l\u00F8cal_dir", u"local_dir"),
|
||||||
|
base=self.basedir)
|
||||||
|
self.mkdir_nonascii(self.local_dir)
|
||||||
|
|
||||||
|
d = defer.succeed(None)
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.dirs_monitored'), 0))
|
||||||
|
|
||||||
|
d.addCallback(lambda ign: self.create_invite_join_magic_folder(u"Alice", self.local_dir))
|
||||||
|
d.addCallback(self._restart_client)
|
||||||
|
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.dirs_monitored'), 1))
|
||||||
|
d.addBoth(self.cleanup)
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.dirs_monitored'), 0))
|
||||||
|
return d
|
||||||
|
|
||||||
|
def test_move_tree(self):
|
||||||
|
self.set_up_grid()
|
||||||
|
|
||||||
|
self.local_dir = abspath_expanduser_unicode(self.unicode_or_fallback(u"l\u00F8cal_dir", u"local_dir"),
|
||||||
|
base=self.basedir)
|
||||||
|
self.mkdir_nonascii(self.local_dir)
|
||||||
|
|
||||||
|
empty_tree_name = self.unicode_or_fallback(u"empty_tr\u00EAe", u"empty_tree")
|
||||||
|
empty_tree_dir = abspath_expanduser_unicode(empty_tree_name, base=self.basedir)
|
||||||
|
new_empty_tree_dir = abspath_expanduser_unicode(empty_tree_name, base=self.local_dir)
|
||||||
|
|
||||||
|
small_tree_name = self.unicode_or_fallback(u"small_tr\u00EAe", u"empty_tree")
|
||||||
|
small_tree_dir = abspath_expanduser_unicode(small_tree_name, base=self.basedir)
|
||||||
|
new_small_tree_dir = abspath_expanduser_unicode(small_tree_name, base=self.local_dir)
|
||||||
|
|
||||||
|
d = self.create_invite_join_magic_folder(u"Alice", self.local_dir)
|
||||||
|
d.addCallback(self._restart_client)
|
||||||
|
|
||||||
|
def _check_move_empty_tree(res):
|
||||||
|
self.mkdir_nonascii(empty_tree_dir)
|
||||||
|
d2 = self.magicfolder.uploader.set_hook('processed')
|
||||||
|
os.rename(empty_tree_dir, new_empty_tree_dir)
|
||||||
|
self.notify(to_filepath(new_empty_tree_dir), self.inotify.IN_MOVED_TO)
|
||||||
|
return d2
|
||||||
|
d.addCallback(_check_move_empty_tree)
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 1))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded'), 0))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 1))
|
||||||
|
|
||||||
|
def _check_move_small_tree(res):
|
||||||
|
self.mkdir_nonascii(small_tree_dir)
|
||||||
|
fileutil.write(abspath_expanduser_unicode(u"what", base=small_tree_dir), "say when")
|
||||||
|
d2 = self.magicfolder.uploader.set_hook('processed', ignore_count=1)
|
||||||
|
os.rename(small_tree_dir, new_small_tree_dir)
|
||||||
|
self.notify(to_filepath(new_small_tree_dir), self.inotify.IN_MOVED_TO)
|
||||||
|
return d2
|
||||||
|
d.addCallback(_check_move_small_tree)
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 3))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded'), 1))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 2))
|
||||||
|
|
||||||
|
def _check_moved_tree_is_watched(res):
|
||||||
|
d2 = self.magicfolder.uploader.set_hook('processed')
|
||||||
|
fileutil.write(abspath_expanduser_unicode(u"another", base=new_small_tree_dir), "file")
|
||||||
|
self.notify(to_filepath(abspath_expanduser_unicode(u"another", base=new_small_tree_dir)), self.inotify.IN_CLOSE_WRITE)
|
||||||
|
return d2
|
||||||
|
d.addCallback(_check_moved_tree_is_watched)
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 4))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded'), 2))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 2))
|
||||||
|
|
||||||
|
# Files that are moved out of the upload directory should no longer be watched.
|
||||||
|
def _move_dir_away(ign):
|
||||||
|
os.rename(new_empty_tree_dir, empty_tree_dir)
|
||||||
|
# Wuh? Why don't we get this event for the real test?
|
||||||
|
#self.notify(to_filepath(new_empty_tree_dir), self.inotify.IN_MOVED_FROM)
|
||||||
|
d.addCallback(_move_dir_away)
|
||||||
|
def create_file(val):
|
||||||
|
test_file = abspath_expanduser_unicode(u"what", base=empty_tree_dir)
|
||||||
|
fileutil.write(test_file, "meow")
|
||||||
|
return
|
||||||
|
d.addCallback(create_file)
|
||||||
|
d.addCallback(lambda ign: time.sleep(1))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 4))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded'), 2))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 2))
|
||||||
|
|
||||||
|
d.addBoth(self.cleanup)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def test_persistence(self):
|
||||||
|
"""
|
||||||
|
Perform an upload of a given file and then stop the client.
|
||||||
|
Start a new client and magic-folder service... and verify that the file is NOT uploaded
|
||||||
|
a second time. This test is meant to test the database persistence along with
|
||||||
|
the startup and shutdown code paths of the magic-folder service.
|
||||||
|
"""
|
||||||
|
self.set_up_grid()
|
||||||
|
self.local_dir = abspath_expanduser_unicode(u"test_persistence", base=self.basedir)
|
||||||
|
self.mkdir_nonascii(self.local_dir)
|
||||||
|
self.collective_dircap = ""
|
||||||
|
|
||||||
|
d = defer.succeed(None)
|
||||||
|
d.addCallback(lambda ign: self.create_invite_join_magic_folder(u"Alice", self.local_dir))
|
||||||
|
d.addCallback(self._restart_client)
|
||||||
|
|
||||||
|
def create_test_file(filename):
|
||||||
|
d2 = self.magicfolder.uploader.set_hook('processed')
|
||||||
|
test_file = abspath_expanduser_unicode(filename, base=self.local_dir)
|
||||||
|
fileutil.write(test_file, "meow %s" % filename)
|
||||||
|
self.notify(to_filepath(test_file), self.inotify.IN_CLOSE_WRITE)
|
||||||
|
return d2
|
||||||
|
d.addCallback(lambda ign: create_test_file(u"what1"))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 1))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
|
||||||
|
d.addCallback(self.cleanup)
|
||||||
|
|
||||||
|
d.addCallback(self._restart_client)
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 1))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
|
||||||
|
d.addCallback(lambda ign: create_test_file(u"what2"))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 2))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
|
||||||
|
d.addBoth(self.cleanup)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def test_magic_folder(self):
|
||||||
|
self.set_up_grid()
|
||||||
|
self.local_dir = os.path.join(self.basedir, self.unicode_or_fallback(u"loc\u0101l_dir", u"local_dir"))
|
||||||
|
self.mkdir_nonascii(self.local_dir)
|
||||||
|
|
||||||
|
d = self.create_invite_join_magic_folder(u"Alice\u0101", self.local_dir)
|
||||||
|
d.addCallback(self._restart_client)
|
||||||
|
|
||||||
|
# Write something short enough for a LIT file.
|
||||||
|
d.addCallback(lambda ign: self._check_file(u"short", "test"))
|
||||||
|
|
||||||
|
# Write to the same file again with different data.
|
||||||
|
d.addCallback(lambda ign: self._check_file(u"short", "different"))
|
||||||
|
|
||||||
|
# Test that temporary files are not uploaded.
|
||||||
|
d.addCallback(lambda ign: self._check_file(u"tempfile", "test", temporary=True))
|
||||||
|
|
||||||
|
# Test that we tolerate creation of a subdirectory.
|
||||||
|
d.addCallback(lambda ign: os.mkdir(os.path.join(self.local_dir, u"directory")))
|
||||||
|
|
||||||
|
# Write something longer, and also try to test a Unicode name if the fs can represent it.
|
||||||
|
name_u = self.unicode_or_fallback(u"l\u00F8ng", u"long")
|
||||||
|
d.addCallback(lambda ign: self._check_file(name_u, "test"*100))
|
||||||
|
|
||||||
|
# TODO: test that causes an upload failure.
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_failed'), 0))
|
||||||
|
|
||||||
|
d.addBoth(self.cleanup)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def _check_file(self, name_u, data, temporary=False):
|
||||||
|
previously_uploaded = self._get_count('uploader.objects_succeeded')
|
||||||
|
previously_disappeared = self._get_count('uploader.objects_disappeared')
|
||||||
|
|
||||||
|
d = self.magicfolder.uploader.set_hook('processed')
|
||||||
|
|
||||||
|
path_u = abspath_expanduser_unicode(name_u, base=self.local_dir)
|
||||||
|
path = to_filepath(path_u)
|
||||||
|
|
||||||
|
# We don't use FilePath.setContent() here because it creates a temporary file that
|
||||||
|
# is renamed into place, which causes events that the test is not expecting.
|
||||||
|
f = open(path_u, "wb")
|
||||||
|
try:
|
||||||
|
if temporary and sys.platform != "win32":
|
||||||
|
os.unlink(path_u)
|
||||||
|
f.write(data)
|
||||||
|
finally:
|
||||||
|
f.close()
|
||||||
|
if temporary and sys.platform == "win32":
|
||||||
|
os.unlink(path_u)
|
||||||
|
self.notify(path, self.inotify.IN_DELETE)
|
||||||
|
fileutil.flush_volume(path_u)
|
||||||
|
self.notify(path, self.inotify.IN_CLOSE_WRITE)
|
||||||
|
|
||||||
|
if temporary:
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_disappeared'),
|
||||||
|
previously_disappeared + 1))
|
||||||
|
else:
|
||||||
|
d.addCallback(lambda ign: self.upload_dirnode.get(name_u))
|
||||||
|
d.addCallback(download_to_data)
|
||||||
|
d.addCallback(lambda actual_data: self.failUnlessReallyEqual(actual_data, data))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'),
|
||||||
|
previously_uploaded + 1))
|
||||||
|
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
|
||||||
|
return d
|
||||||
|
|
||||||
|
def _check_version_in_dmd(self, magicfolder, relpath_u, expected_version):
|
||||||
|
encoded_name_u = magicpath.path2magic(relpath_u)
|
||||||
|
d = magicfolder.downloader._get_collective_latest_file(encoded_name_u)
|
||||||
|
def check_latest(result):
|
||||||
|
if result[0] is not None:
|
||||||
|
node, metadata = result
|
||||||
|
d.addCallback(lambda ign: self.failUnlessEqual(metadata['version'], expected_version))
|
||||||
|
d.addCallback(check_latest)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def _check_version_in_local_db(self, magicfolder, relpath_u, expected_version):
|
||||||
|
version = magicfolder._db.get_local_file_version(relpath_u)
|
||||||
|
#print "_check_version_in_local_db: %r has version %s" % (relpath_u, version)
|
||||||
|
self.failUnlessEqual(version, expected_version)
|
||||||
|
|
||||||
|
def test_alice_bob(self):
|
||||||
|
d = self.setup_alice_and_bob()
|
||||||
|
def get_results(result):
|
||||||
|
# XXX are these used?
|
||||||
|
(self.alice_collective_dircap, self.alice_upload_dircap, self.alice_magicfolder,
|
||||||
|
self.bob_collective_dircap, self.bob_upload_dircap, self.bob_magicfolder) = result
|
||||||
|
#print "Alice magicfolderdb is at %r" % (self.alice_magicfolder._client.basedir)
|
||||||
|
#print "Bob magicfolderdb is at %r" % (self.bob_magicfolder._client.basedir)
|
||||||
|
d.addCallback(get_results)
|
||||||
|
|
||||||
|
def Alice_write_a_file(result):
|
||||||
|
#print "Alice writes a file\n"
|
||||||
|
self.file_path = abspath_expanduser_unicode(u"file1", base=self.alice_magicfolder.uploader._local_path_u)
|
||||||
|
fileutil.write(self.file_path, "meow, meow meow. meow? meow meow! meow.")
|
||||||
|
self.magicfolder = self.alice_magicfolder
|
||||||
|
self.notify(to_filepath(self.file_path), self.inotify.IN_CLOSE_WRITE)
|
||||||
|
|
||||||
|
d.addCallback(Alice_write_a_file)
|
||||||
|
|
||||||
|
def Alice_wait_for_upload(result):
|
||||||
|
#print "Alice waits for an upload\n"
|
||||||
|
d2 = self.alice_magicfolder.uploader.set_hook('processed')
|
||||||
|
return d2
|
||||||
|
d.addCallback(Alice_wait_for_upload)
|
||||||
|
d.addCallback(lambda ign: self._check_version_in_dmd(self.alice_magicfolder, u"file1", 0))
|
||||||
|
d.addCallback(lambda ign: self._check_version_in_local_db(self.alice_magicfolder, u"file1", 0))
|
||||||
|
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded', client=self.alice_magicfolder._client), 1))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded', client=self.alice_magicfolder._client), 1))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued', client=self.alice_magicfolder._client), 0))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created', client=self.alice_magicfolder._client), 0))
|
||||||
|
|
||||||
|
def Bob_wait_for_download(result):
|
||||||
|
#print "Bob waits for a download\n"
|
||||||
|
d2 = self.bob_magicfolder.downloader.set_hook('processed')
|
||||||
|
return d2
|
||||||
|
d.addCallback(Bob_wait_for_download)
|
||||||
|
d.addCallback(lambda ign: self._check_version_in_local_db(self.bob_magicfolder, u"file1", 0))
|
||||||
|
d.addCallback(lambda ign: self._check_version_in_dmd(self.bob_magicfolder, u"file1", 0)) # XXX prolly not needed
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('downloader.objects_downloaded', client=self.bob_magicfolder._client), 1))
|
||||||
|
|
||||||
|
|
||||||
|
# test deletion of file behavior
|
||||||
|
def Alice_delete_file(result):
|
||||||
|
#print "Alice deletes the file!\n"
|
||||||
|
os.unlink(self.file_path)
|
||||||
|
self.notify(to_filepath(self.file_path), self.inotify.IN_DELETE)
|
||||||
|
|
||||||
|
return None
|
||||||
|
d.addCallback(Alice_delete_file)
|
||||||
|
d.addCallback(Alice_wait_for_upload)
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded', client=self.alice_magicfolder._client), 2))
|
||||||
|
d.addCallback(lambda ign: self._check_version_in_dmd(self.alice_magicfolder, u"file1", 1))
|
||||||
|
d.addCallback(lambda ign: self._check_version_in_local_db(self.alice_magicfolder, u"file1", 1))
|
||||||
|
|
||||||
|
d.addCallback(Bob_wait_for_download)
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('downloader.objects_downloaded', client=self.bob_magicfolder._client), 2))
|
||||||
|
d.addCallback(lambda ign: self._check_version_in_local_db(self.bob_magicfolder, u"file1", 1))
|
||||||
|
d.addCallback(lambda ign: self._check_version_in_dmd(self.bob_magicfolder, u"file1", 1))
|
||||||
|
|
||||||
|
|
||||||
|
def Alice_rewrite_file(result):
|
||||||
|
#print "Alice rewrites file\n"
|
||||||
|
self.file_path = abspath_expanduser_unicode(u"file1", base=self.alice_magicfolder.uploader._local_path_u)
|
||||||
|
fileutil.write(self.file_path, "Alice suddenly sees the white rabbit running into the forest.")
|
||||||
|
self.magicfolder = self.alice_magicfolder
|
||||||
|
self.notify(to_filepath(self.file_path), self.inotify.IN_CLOSE_WRITE)
|
||||||
|
d.addCallback(Alice_rewrite_file)
|
||||||
|
|
||||||
|
d.addCallback(Alice_wait_for_upload)
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded', client=self.alice_magicfolder._client), 3))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded', client=self.alice_magicfolder._client), 3))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued', client=self.alice_magicfolder._client), 0))
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created', client=self.alice_magicfolder._client), 0))
|
||||||
|
|
||||||
|
d.addCallback(Bob_wait_for_download)
|
||||||
|
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('downloader.objects_downloaded', client=self.bob_magicfolder._client), 3))
|
||||||
|
|
||||||
|
def cleanup_Alice_and_Bob(result):
|
||||||
|
d = defer.succeed(None)
|
||||||
|
d.addCallback(lambda ign: self.alice_magicfolder.finish())
|
||||||
|
d.addCallback(lambda ign: self.bob_magicfolder.finish())
|
||||||
|
d.addCallback(lambda ign: result)
|
||||||
|
return d
|
||||||
|
d.addCallback(cleanup_Alice_and_Bob)
|
||||||
|
return d
|
||||||
|
|
||||||
|
class MockTest(MagicFolderTestMixin, unittest.TestCase):
|
||||||
|
"""This can run on any platform, and even if twisted.internet.inotify can't be imported."""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
MagicFolderTestMixin.setUp(self)
|
||||||
|
self.inotify = fake_inotify
|
||||||
|
self.patch(magic_folder, 'get_inotify_module', lambda: self.inotify)
|
||||||
|
|
||||||
|
def notify(self, path, mask):
|
||||||
|
self.magicfolder.uploader._notifier.event(path, mask)
|
||||||
|
|
||||||
|
def test_errors(self):
|
||||||
|
self.set_up_grid()
|
||||||
|
|
||||||
|
errors_dir = abspath_expanduser_unicode(u"errors_dir", base=self.basedir)
|
||||||
|
os.mkdir(errors_dir)
|
||||||
|
not_a_dir = abspath_expanduser_unicode(u"NOT_A_DIR", base=self.basedir)
|
||||||
|
fileutil.write(not_a_dir, "")
|
||||||
|
magicfolderdb = abspath_expanduser_unicode(u"magicfolderdb", base=self.basedir)
|
||||||
|
doesnotexist = abspath_expanduser_unicode(u"doesnotexist", base=self.basedir)
|
||||||
|
|
||||||
|
client = self.g.clients[0]
|
||||||
|
d = client.create_dirnode()
|
||||||
|
def _check_errors(n):
|
||||||
|
self.failUnless(IDirectoryNode.providedBy(n))
|
||||||
|
upload_dircap = n.get_uri()
|
||||||
|
readonly_dircap = n.get_readonly_uri()
|
||||||
|
|
||||||
|
self.shouldFail(AssertionError, 'nonexistent local.directory', 'there is no directory',
|
||||||
|
MagicFolder, client, upload_dircap, '', doesnotexist, magicfolderdb)
|
||||||
|
self.shouldFail(AssertionError, 'non-directory local.directory', 'is not a directory',
|
||||||
|
MagicFolder, client, upload_dircap, '', not_a_dir, magicfolderdb)
|
||||||
|
self.shouldFail(AssertionError, 'bad upload.dircap', 'does not refer to a directory',
|
||||||
|
MagicFolder, client, 'bad', '', errors_dir, magicfolderdb)
|
||||||
|
self.shouldFail(AssertionError, 'non-directory upload.dircap', 'does not refer to a directory',
|
||||||
|
MagicFolder, client, 'URI:LIT:foo', '', errors_dir, magicfolderdb)
|
||||||
|
self.shouldFail(AssertionError, 'readonly upload.dircap', 'is not a writecap to a directory',
|
||||||
|
MagicFolder, client, readonly_dircap, '', errors_dir, magicfolderdb,)
|
||||||
|
self.shouldFail(AssertionError, 'collective dircap',
|
||||||
|
"The URI in 'private/collective_dircap' is not a readonly cap to a directory.",
|
||||||
|
MagicFolder, client, upload_dircap, upload_dircap, errors_dir, magicfolderdb)
|
||||||
|
|
||||||
|
def _not_implemented():
|
||||||
|
raise NotImplementedError("blah")
|
||||||
|
self.patch(magic_folder, 'get_inotify_module', _not_implemented)
|
||||||
|
self.shouldFail(NotImplementedError, 'unsupported', 'blah',
|
||||||
|
MagicFolder, client, upload_dircap, '', errors_dir, magicfolderdb)
|
||||||
|
d.addCallback(_check_errors)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def test_write_downloaded_file(self):
|
||||||
|
workdir = u"cli/MagicFolder/write-downloaded-file"
|
||||||
|
local_file = fileutil.abspath_expanduser_unicode(os.path.join(workdir, "foobar"))
|
||||||
|
|
||||||
|
# create a file with name "foobar" with content "foo"
|
||||||
|
# write downloaded file content "bar" into "foobar" with is_conflict = False
|
||||||
|
fileutil.make_dirs(workdir)
|
||||||
|
fileutil.write(local_file, "foo")
|
||||||
|
|
||||||
|
# if is_conflict is False, then the .conflict file shouldn't exist.
|
||||||
|
Downloader._write_downloaded_file(local_file, "bar", False, None)
|
||||||
|
conflicted_path = local_file + u".conflict"
|
||||||
|
self.failIf(os.path.exists(conflicted_path))
|
||||||
|
|
||||||
|
# At this point, the backup file should exist with content "foo"
|
||||||
|
backup_path = local_file + u".backup"
|
||||||
|
self.failUnless(os.path.exists(backup_path))
|
||||||
|
self.failUnlessEqual(fileutil.read(backup_path), "foo")
|
||||||
|
|
||||||
|
# .tmp file shouldn't exist
|
||||||
|
self.failIf(os.path.exists(local_file + u".tmp"))
|
||||||
|
|
||||||
|
# .. and the original file should have the new content
|
||||||
|
self.failUnlessEqual(fileutil.read(local_file), "bar")
|
||||||
|
|
||||||
|
# now a test for conflicted case
|
||||||
|
Downloader._write_downloaded_file(local_file, "bar", True, None)
|
||||||
|
self.failUnless(os.path.exists(conflicted_path))
|
||||||
|
|
||||||
|
# .tmp file shouldn't exist
|
||||||
|
self.failIf(os.path.exists(local_file + u".tmp"))
|
||||||
|
|
||||||
|
|
||||||
|
class RealTest(MagicFolderTestMixin, unittest.TestCase):
|
||||||
|
"""This is skipped unless both Twisted and the platform support inotify."""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
MagicFolderTestMixin.setUp(self)
|
||||||
|
self.inotify = magic_folder.get_inotify_module()
|
||||||
|
|
||||||
|
def notify(self, path, mask):
|
||||||
|
# Writing to the filesystem causes the notification.
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
magic_folder.get_inotify_module()
|
||||||
|
except NotImplementedError:
|
||||||
|
RealTest.skip = "Magic Folder support can only be tested for-real on an OS that supports inotify or equivalent."
|
|
@ -0,0 +1,28 @@
|
||||||
|
|
||||||
|
from twisted.trial import unittest
|
||||||
|
|
||||||
|
from allmydata import magicpath
|
||||||
|
|
||||||
|
|
||||||
|
class MagicPath(unittest.TestCase):
|
||||||
|
tests = {
|
||||||
|
u"Documents/work/critical-project/qed.txt": u"Documents@_work@_critical-project@_qed.txt",
|
||||||
|
u"Documents/emails/bunnyfufu@hoppingforest.net": u"Documents@_emails@_bunnyfufu@@hoppingforest.net",
|
||||||
|
u"foo/@/bar": u"foo@_@@@_bar",
|
||||||
|
}
|
||||||
|
|
||||||
|
def test_path2magic(self):
|
||||||
|
for test, expected in self.tests.items():
|
||||||
|
self.failUnlessEqual(magicpath.path2magic(test), expected)
|
||||||
|
|
||||||
|
def test_magic2path(self):
|
||||||
|
for expected, test in self.tests.items():
|
||||||
|
self.failUnlessEqual(magicpath.magic2path(test), expected)
|
||||||
|
|
||||||
|
def test_should_ignore(self):
|
||||||
|
self.failUnlessEqual(magicpath.should_ignore_file(u".bashrc"), True)
|
||||||
|
self.failUnlessEqual(magicpath.should_ignore_file(u"bashrc."), False)
|
||||||
|
self.failUnlessEqual(magicpath.should_ignore_file(u"forest/tree/branch/.bashrc"), True)
|
||||||
|
self.failUnlessEqual(magicpath.should_ignore_file(u"forest/tree/.branch/bashrc"), True)
|
||||||
|
self.failUnlessEqual(magicpath.should_ignore_file(u"forest/.tree/branch/bashrc"), True)
|
||||||
|
self.failUnlessEqual(magicpath.should_ignore_file(u"forest/tree/branch/bashrc"), False)
|
|
@ -234,7 +234,7 @@ def make_storagebroker(s=None, num_peers=10):
|
||||||
s = FakeStorage()
|
s = FakeStorage()
|
||||||
peerids = [tagged_hash("peerid", "%d" % i)[:20]
|
peerids = [tagged_hash("peerid", "%d" % i)[:20]
|
||||||
for i in range(num_peers)]
|
for i in range(num_peers)]
|
||||||
storage_broker = StorageFarmBroker(None, True)
|
storage_broker = StorageFarmBroker(None, True, 0, None)
|
||||||
for peerid in peerids:
|
for peerid in peerids:
|
||||||
fss = FakeStorageServer(peerid, s)
|
fss = FakeStorageServer(peerid, s)
|
||||||
ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(peerid),
|
ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(peerid),
|
||||||
|
|
|
@ -198,7 +198,7 @@ class FakeClient:
|
||||||
mode = dict([i,mode] for i in range(num_servers))
|
mode = dict([i,mode] for i in range(num_servers))
|
||||||
servers = [ ("%20d"%fakeid, FakeStorageServer(mode[fakeid]))
|
servers = [ ("%20d"%fakeid, FakeStorageServer(mode[fakeid]))
|
||||||
for fakeid in range(self.num_servers) ]
|
for fakeid in range(self.num_servers) ]
|
||||||
self.storage_broker = StorageFarmBroker(None, permute_peers=True)
|
self.storage_broker = StorageFarmBroker(None, True, 0, None)
|
||||||
for (serverid, rref) in servers:
|
for (serverid, rref) in servers:
|
||||||
ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(serverid),
|
ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(serverid),
|
||||||
"permutation-seed-base32": base32.b2a(serverid) }
|
"permutation-seed-base32": base32.b2a(serverid) }
|
||||||
|
|
|
@ -441,6 +441,74 @@ class FileUtil(ReallyEqualMixin, unittest.TestCase):
|
||||||
self.failIf(os.path.exists(fn))
|
self.failIf(os.path.exists(fn))
|
||||||
self.failUnless(os.path.exists(fn2))
|
self.failUnless(os.path.exists(fn2))
|
||||||
|
|
||||||
|
def test_rename_no_overwrite(self):
|
||||||
|
workdir = fileutil.abspath_expanduser_unicode(u"test_rename_no_overwrite")
|
||||||
|
fileutil.make_dirs(workdir)
|
||||||
|
|
||||||
|
source_path = os.path.join(workdir, "source")
|
||||||
|
dest_path = os.path.join(workdir, "dest")
|
||||||
|
|
||||||
|
# when neither file exists
|
||||||
|
self.failUnlessRaises(OSError, fileutil.rename_no_overwrite, source_path, dest_path)
|
||||||
|
|
||||||
|
# when only dest exists
|
||||||
|
fileutil.write(dest_path, "dest")
|
||||||
|
self.failUnlessRaises(OSError, fileutil.rename_no_overwrite, source_path, dest_path)
|
||||||
|
self.failUnlessEqual(fileutil.read(dest_path), "dest")
|
||||||
|
|
||||||
|
# when both exist
|
||||||
|
fileutil.write(source_path, "source")
|
||||||
|
self.failUnlessRaises(OSError, fileutil.rename_no_overwrite, source_path, dest_path)
|
||||||
|
self.failUnlessEqual(fileutil.read(source_path), "source")
|
||||||
|
self.failUnlessEqual(fileutil.read(dest_path), "dest")
|
||||||
|
|
||||||
|
# when only source exists
|
||||||
|
os.remove(dest_path)
|
||||||
|
fileutil.rename_no_overwrite(source_path, dest_path)
|
||||||
|
self.failUnlessEqual(fileutil.read(dest_path), "source")
|
||||||
|
self.failIf(os.path.exists(source_path))
|
||||||
|
|
||||||
|
def test_replace_file(self):
|
||||||
|
workdir = fileutil.abspath_expanduser_unicode(u"test_replace_file")
|
||||||
|
fileutil.make_dirs(workdir)
|
||||||
|
|
||||||
|
backup_path = os.path.join(workdir, "backup")
|
||||||
|
replaced_path = os.path.join(workdir, "replaced")
|
||||||
|
replacement_path = os.path.join(workdir, "replacement")
|
||||||
|
|
||||||
|
# when none of the files exist
|
||||||
|
self.failUnlessRaises(fileutil.ConflictError, fileutil.replace_file, replaced_path, replacement_path, backup_path)
|
||||||
|
|
||||||
|
# when only replaced exists
|
||||||
|
fileutil.write(replaced_path, "foo")
|
||||||
|
self.failUnlessRaises(fileutil.ConflictError, fileutil.replace_file, replaced_path, replacement_path, backup_path)
|
||||||
|
self.failUnlessEqual(fileutil.read(replaced_path), "foo")
|
||||||
|
|
||||||
|
# when both replaced and replacement exist, but not backup
|
||||||
|
fileutil.write(replacement_path, "bar")
|
||||||
|
fileutil.replace_file(replaced_path, replacement_path, backup_path)
|
||||||
|
self.failUnlessEqual(fileutil.read(backup_path), "foo")
|
||||||
|
self.failUnlessEqual(fileutil.read(replaced_path), "bar")
|
||||||
|
self.failIf(os.path.exists(replacement_path))
|
||||||
|
|
||||||
|
# when only replacement exists
|
||||||
|
os.remove(backup_path)
|
||||||
|
os.remove(replaced_path)
|
||||||
|
fileutil.write(replacement_path, "bar")
|
||||||
|
fileutil.replace_file(replaced_path, replacement_path, backup_path)
|
||||||
|
self.failUnlessEqual(fileutil.read(replaced_path), "bar")
|
||||||
|
self.failIf(os.path.exists(replacement_path))
|
||||||
|
self.failIf(os.path.exists(backup_path))
|
||||||
|
|
||||||
|
# when replaced, replacement and backup all exist
|
||||||
|
fileutil.write(replaced_path, "foo")
|
||||||
|
fileutil.write(replacement_path, "bar")
|
||||||
|
fileutil.write(backup_path, "bak")
|
||||||
|
fileutil.replace_file(replaced_path, replacement_path, backup_path)
|
||||||
|
self.failUnlessEqual(fileutil.read(backup_path), "foo")
|
||||||
|
self.failUnlessEqual(fileutil.read(replaced_path), "bar")
|
||||||
|
self.failIf(os.path.exists(replacement_path))
|
||||||
|
|
||||||
def test_du(self):
|
def test_du(self):
|
||||||
basedir = "util/FileUtil/test_du"
|
basedir = "util/FileUtil/test_du"
|
||||||
fileutil.make_dirs(basedir)
|
fileutil.make_dirs(basedir)
|
||||||
|
@ -567,6 +635,38 @@ class FileUtil(ReallyEqualMixin, unittest.TestCase):
|
||||||
disk = fileutil.get_disk_stats('.', 2**128)
|
disk = fileutil.get_disk_stats('.', 2**128)
|
||||||
self.failUnlessEqual(disk['avail'], 0)
|
self.failUnlessEqual(disk['avail'], 0)
|
||||||
|
|
||||||
|
def test_get_pathinfo(self):
|
||||||
|
basedir = "util/FileUtil/test_get_pathinfo"
|
||||||
|
fileutil.make_dirs(basedir)
|
||||||
|
|
||||||
|
# create a directory
|
||||||
|
self.mkdir(basedir, "a")
|
||||||
|
dirinfo = fileutil.get_pathinfo(basedir)
|
||||||
|
self.failUnlessTrue(dirinfo.isdir)
|
||||||
|
self.failUnlessTrue(dirinfo.exists)
|
||||||
|
self.failUnlessFalse(dirinfo.isfile)
|
||||||
|
self.failUnlessFalse(dirinfo.islink)
|
||||||
|
|
||||||
|
# create a file under the directory
|
||||||
|
f = os.path.join(basedir, "a/1.txt")
|
||||||
|
self.touch(basedir, "a/1.txt", data="a"*10)
|
||||||
|
fileinfo = fileutil.get_pathinfo(f)
|
||||||
|
self.failUnlessTrue(fileinfo.isfile)
|
||||||
|
self.failUnlessTrue(fileinfo.exists)
|
||||||
|
self.failUnlessFalse(fileinfo.isdir)
|
||||||
|
self.failUnlessFalse(fileinfo.islink)
|
||||||
|
self.failUnlessEqual(fileinfo.size, 10)
|
||||||
|
|
||||||
|
# create a symlink under the directory a pointing to 1.txt
|
||||||
|
slname = os.path.join(basedir, "a/linkto1.txt")
|
||||||
|
os.symlink(f, slname)
|
||||||
|
symlinkinfo = fileutil.get_pathinfo(slname)
|
||||||
|
self.failUnlessTrue(symlinkinfo.islink)
|
||||||
|
self.failUnlessTrue(symlinkinfo.exists)
|
||||||
|
self.failUnlessFalse(symlinkinfo.isfile)
|
||||||
|
self.failUnlessFalse(symlinkinfo.isdir)
|
||||||
|
|
||||||
|
|
||||||
class PollMixinTests(unittest.TestCase):
|
class PollMixinTests(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.pm = pollmixin.PollMixin()
|
self.pm = pollmixin.PollMixin()
|
||||||
|
|
|
@ -239,7 +239,7 @@ class FakeClient(Client):
|
||||||
self._secret_holder = SecretHolder("lease secret", "convergence secret")
|
self._secret_holder = SecretHolder("lease secret", "convergence secret")
|
||||||
self.helper = None
|
self.helper = None
|
||||||
self.convergence = "some random string"
|
self.convergence = "some random string"
|
||||||
self.storage_broker = StorageFarmBroker(None, permute_peers=True)
|
self.storage_broker = StorageFarmBroker(None, True, 0, None)
|
||||||
# fake knowledge of another server
|
# fake knowledge of another server
|
||||||
self.storage_broker.test_add_server("other_nodeid",
|
self.storage_broker.test_add_server("other_nodeid",
|
||||||
FakeDisplayableServer("other_nodeid", u"other_nickname \u263B"))
|
FakeDisplayableServer("other_nodeid", u"other_nickname \u263B"))
|
||||||
|
|
|
@ -5,6 +5,7 @@ from foolscap.api import eventually, fireEventually
|
||||||
from twisted.internet import defer, reactor
|
from twisted.internet import defer, reactor
|
||||||
|
|
||||||
from allmydata.util import log
|
from allmydata.util import log
|
||||||
|
from allmydata.util.assertutil import _assert
|
||||||
from allmydata.util.pollmixin import PollMixin
|
from allmydata.util.pollmixin import PollMixin
|
||||||
|
|
||||||
|
|
||||||
|
@ -77,11 +78,13 @@ class HookMixin:
|
||||||
I am a helper mixin that maintains a collection of named hooks, primarily
|
I am a helper mixin that maintains a collection of named hooks, primarily
|
||||||
for use in tests. Each hook is set to an unfired Deferred using 'set_hook',
|
for use in tests. Each hook is set to an unfired Deferred using 'set_hook',
|
||||||
and can then be fired exactly once at the appropriate time by '_call_hook'.
|
and can then be fired exactly once at the appropriate time by '_call_hook'.
|
||||||
|
If 'ignore_count' is given, that number of calls to '_call_hook' will be
|
||||||
|
ignored before firing the hook.
|
||||||
|
|
||||||
I assume a '_hooks' attribute that should set by the class constructor to
|
I assume a '_hooks' attribute that should set by the class constructor to
|
||||||
a dict mapping each valid hook name to None.
|
a dict mapping each valid hook name to None.
|
||||||
"""
|
"""
|
||||||
def set_hook(self, name, d=None):
|
def set_hook(self, name, d=None, ignore_count=0):
|
||||||
"""
|
"""
|
||||||
Called by the hook observer (e.g. by a test).
|
Called by the hook observer (e.g. by a test).
|
||||||
If d is not given, an unfired Deferred is created and returned.
|
If d is not given, an unfired Deferred is created and returned.
|
||||||
|
@ -89,16 +92,20 @@ class HookMixin:
|
||||||
"""
|
"""
|
||||||
if d is None:
|
if d is None:
|
||||||
d = defer.Deferred()
|
d = defer.Deferred()
|
||||||
assert self._hooks[name] is None, self._hooks[name]
|
_assert(ignore_count >= 0, ignore_count=ignore_count)
|
||||||
assert isinstance(d, defer.Deferred), d
|
_assert(name in self._hooks, name=name)
|
||||||
self._hooks[name] = d
|
_assert(self._hooks[name] is None, name=name, hook=self._hooks[name])
|
||||||
|
_assert(isinstance(d, defer.Deferred), d=d)
|
||||||
|
|
||||||
|
self._hooks[name] = (d, ignore_count)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def _call_hook(self, res, name):
|
def _call_hook(self, res, name):
|
||||||
"""
|
"""
|
||||||
Called to trigger the hook, with argument 'res'. This is a no-op if the
|
Called to trigger the hook, with argument 'res'. This is a no-op if
|
||||||
hook is unset. Otherwise, the hook will be unset, and then its Deferred
|
the hook is unset. If the hook's ignore_count is positive, it will be
|
||||||
will be fired synchronously.
|
decremented; if it was already zero, the hook will be unset, and then
|
||||||
|
its Deferred will be fired synchronously.
|
||||||
|
|
||||||
The expected usage is "deferred.addBoth(self._call_hook, 'hookname')".
|
The expected usage is "deferred.addBoth(self._call_hook, 'hookname')".
|
||||||
This ensures that if 'res' is a failure, the hook will be errbacked,
|
This ensures that if 'res' is a failure, the hook will be errbacked,
|
||||||
|
@ -106,9 +113,15 @@ class HookMixin:
|
||||||
'res' is returned so that the current result or failure will be passed
|
'res' is returned so that the current result or failure will be passed
|
||||||
through.
|
through.
|
||||||
"""
|
"""
|
||||||
d = self._hooks[name]
|
hook = self._hooks[name]
|
||||||
if d is None:
|
if hook is None:
|
||||||
return defer.succeed(None)
|
return defer.succeed(None)
|
||||||
|
|
||||||
|
(d, ignore_count) = hook
|
||||||
|
log.msg("call_hook", name=name, ignore_count=ignore_count, level=log.NOISY)
|
||||||
|
if ignore_count > 0:
|
||||||
|
self._hooks[name] = (d, ignore_count - 1)
|
||||||
|
else:
|
||||||
self._hooks[name] = None
|
self._hooks[name] = None
|
||||||
_with_log(d.callback, res)
|
_with_log(d.callback, res)
|
||||||
return res
|
return res
|
||||||
|
|
|
@ -8,6 +8,7 @@ from types import NoneType
|
||||||
|
|
||||||
from allmydata.util.assertutil import precondition
|
from allmydata.util.assertutil import precondition
|
||||||
from twisted.python import usage
|
from twisted.python import usage
|
||||||
|
from twisted.python.filepath import FilePath
|
||||||
from allmydata.util import log
|
from allmydata.util import log
|
||||||
from allmydata.util.fileutil import abspath_expanduser_unicode
|
from allmydata.util.fileutil import abspath_expanduser_unicode
|
||||||
|
|
||||||
|
@ -35,9 +36,10 @@ def check_encoding(encoding):
|
||||||
filesystem_encoding = None
|
filesystem_encoding = None
|
||||||
io_encoding = None
|
io_encoding = None
|
||||||
is_unicode_platform = False
|
is_unicode_platform = False
|
||||||
|
use_unicode_filepath = False
|
||||||
|
|
||||||
def _reload():
|
def _reload():
|
||||||
global filesystem_encoding, io_encoding, is_unicode_platform
|
global filesystem_encoding, io_encoding, is_unicode_platform, use_unicode_filepath
|
||||||
|
|
||||||
filesystem_encoding = canonical_encoding(sys.getfilesystemencoding())
|
filesystem_encoding = canonical_encoding(sys.getfilesystemencoding())
|
||||||
check_encoding(filesystem_encoding)
|
check_encoding(filesystem_encoding)
|
||||||
|
@ -61,6 +63,12 @@ def _reload():
|
||||||
|
|
||||||
is_unicode_platform = sys.platform in ["win32", "darwin"]
|
is_unicode_platform = sys.platform in ["win32", "darwin"]
|
||||||
|
|
||||||
|
# Despite the Unicode-mode FilePath support added to Twisted in
|
||||||
|
# <https://twistedmatrix.com/trac/ticket/7805>, we can't yet use
|
||||||
|
# Unicode-mode FilePaths with INotify on non-Windows platforms
|
||||||
|
# due to <https://twistedmatrix.com/trac/ticket/7928>.
|
||||||
|
use_unicode_filepath = sys.platform == "win32"
|
||||||
|
|
||||||
_reload()
|
_reload()
|
||||||
|
|
||||||
|
|
||||||
|
@ -245,6 +253,23 @@ def quote_local_unicode_path(path, quotemarks=True):
|
||||||
|
|
||||||
return quote_output(path, quotemarks=quotemarks, quote_newlines=True)
|
return quote_output(path, quotemarks=quotemarks, quote_newlines=True)
|
||||||
|
|
||||||
|
def to_filepath(path):
|
||||||
|
precondition(isinstance(path, basestring), path=path)
|
||||||
|
|
||||||
|
if isinstance(path, unicode) and not use_unicode_filepath:
|
||||||
|
path = path.encode(filesystem_encoding)
|
||||||
|
|
||||||
|
return FilePath(path)
|
||||||
|
|
||||||
|
def unicode_from_filepath(fp):
|
||||||
|
precondition(isinstance(fp, FilePath), fp=fp)
|
||||||
|
|
||||||
|
path = fp.path
|
||||||
|
if isinstance(path, bytes):
|
||||||
|
path = path.decode(filesystem_encoding)
|
||||||
|
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
def unicode_platform():
|
def unicode_platform():
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -3,6 +3,8 @@ Futz with files like a pro.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import sys, exceptions, os, stat, tempfile, time, binascii
|
import sys, exceptions, os, stat, tempfile, time, binascii
|
||||||
|
from collections import namedtuple
|
||||||
|
from errno import ENOENT
|
||||||
|
|
||||||
from twisted.python import log
|
from twisted.python import log
|
||||||
|
|
||||||
|
@ -515,3 +517,142 @@ def get_available_space(whichdir, reserved_space):
|
||||||
except EnvironmentError:
|
except EnvironmentError:
|
||||||
log.msg("OS call to get disk statistics failed")
|
log.msg("OS call to get disk statistics failed")
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if sys.platform == "win32":
|
||||||
|
from ctypes.wintypes import BOOL, HANDLE, DWORD, LPCWSTR, LPVOID, WinError, get_last_error
|
||||||
|
|
||||||
|
# <http://msdn.microsoft.com/en-us/library/aa363858%28v=vs.85%29.aspx>
|
||||||
|
CreateFileW = WINFUNCTYPE(HANDLE, LPCWSTR, DWORD, DWORD, LPVOID, DWORD, DWORD, HANDLE) \
|
||||||
|
(("CreateFileW", windll.kernel32))
|
||||||
|
|
||||||
|
GENERIC_WRITE = 0x40000000
|
||||||
|
FILE_SHARE_READ = 0x00000001
|
||||||
|
FILE_SHARE_WRITE = 0x00000002
|
||||||
|
OPEN_EXISTING = 3
|
||||||
|
INVALID_HANDLE_VALUE = 0xFFFFFFFF
|
||||||
|
|
||||||
|
# <http://msdn.microsoft.com/en-us/library/aa364439%28v=vs.85%29.aspx>
|
||||||
|
FlushFileBuffers = WINFUNCTYPE(BOOL, HANDLE)(("FlushFileBuffers", windll.kernel32))
|
||||||
|
|
||||||
|
# <http://msdn.microsoft.com/en-us/library/ms724211%28v=vs.85%29.aspx>
|
||||||
|
CloseHandle = WINFUNCTYPE(BOOL, HANDLE)(("CloseHandle", windll.kernel32))
|
||||||
|
|
||||||
|
# <http://social.msdn.microsoft.com/forums/en-US/netfxbcl/thread/4465cafb-f4ed-434f-89d8-c85ced6ffaa8/>
|
||||||
|
def flush_volume(path):
|
||||||
|
drive = os.path.splitdrive(os.path.realpath(path))[0]
|
||||||
|
|
||||||
|
hVolume = CreateFileW(u"\\\\.\\" + drive,
|
||||||
|
GENERIC_WRITE,
|
||||||
|
FILE_SHARE_READ | FILE_SHARE_WRITE,
|
||||||
|
None,
|
||||||
|
OPEN_EXISTING,
|
||||||
|
0,
|
||||||
|
None
|
||||||
|
)
|
||||||
|
if hVolume == INVALID_HANDLE_VALUE:
|
||||||
|
raise WinError()
|
||||||
|
|
||||||
|
if FlushFileBuffers(hVolume) == 0:
|
||||||
|
raise WinError()
|
||||||
|
|
||||||
|
CloseHandle(hVolume)
|
||||||
|
else:
|
||||||
|
def flush_volume(path):
|
||||||
|
# use sync()?
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ConflictError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class UnableToUnlinkReplacementError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def reraise(wrapper):
|
||||||
|
_, exc, tb = sys.exc_info()
|
||||||
|
wrapper_exc = wrapper("%s: %s" % (exc.__class__.__name__, exc))
|
||||||
|
raise wrapper_exc.__class__, wrapper_exc, tb
|
||||||
|
|
||||||
|
if sys.platform == "win32":
|
||||||
|
from ctypes import WINFUNCTYPE, windll, WinError, get_last_error
|
||||||
|
from ctypes.wintypes import BOOL, DWORD, LPCWSTR, LPVOID
|
||||||
|
|
||||||
|
# <https://msdn.microsoft.com/en-us/library/windows/desktop/aa365512%28v=vs.85%29.aspx>
|
||||||
|
ReplaceFileW = WINFUNCTYPE(
|
||||||
|
BOOL,
|
||||||
|
LPCWSTR, LPCWSTR, LPCWSTR, DWORD, LPVOID, LPVOID,
|
||||||
|
use_last_error=True
|
||||||
|
)(("ReplaceFileW", windll.kernel32))
|
||||||
|
|
||||||
|
REPLACEFILE_IGNORE_MERGE_ERRORS = 0x00000002
|
||||||
|
|
||||||
|
def rename_no_overwrite(source_path, dest_path):
|
||||||
|
os.rename(source_path, dest_path)
|
||||||
|
|
||||||
|
def replace_file(replaced_path, replacement_path, backup_path):
|
||||||
|
precondition_abspath(replaced_path)
|
||||||
|
precondition_abspath(replacement_path)
|
||||||
|
precondition_abspath(backup_path)
|
||||||
|
|
||||||
|
r = ReplaceFileW(replaced_path, replacement_path, backup_path,
|
||||||
|
REPLACEFILE_IGNORE_MERGE_ERRORS, None, None)
|
||||||
|
if r == 0:
|
||||||
|
# The UnableToUnlinkReplacementError case does not happen on Windows;
|
||||||
|
# all errors should be treated as signalling a conflict.
|
||||||
|
err = get_last_error()
|
||||||
|
raise ConflictError("WinError: %s" % (WinError(err)))
|
||||||
|
else:
|
||||||
|
def rename_no_overwrite(source_path, dest_path):
|
||||||
|
# link will fail with EEXIST if there is already something at dest_path.
|
||||||
|
os.link(source_path, dest_path)
|
||||||
|
try:
|
||||||
|
os.unlink(source_path)
|
||||||
|
except EnvironmentError:
|
||||||
|
reraise(UnableToUnlinkReplacementError)
|
||||||
|
|
||||||
|
def replace_file(replaced_path, replacement_path, backup_path):
|
||||||
|
precondition_abspath(replaced_path)
|
||||||
|
precondition_abspath(replacement_path)
|
||||||
|
precondition_abspath(backup_path)
|
||||||
|
|
||||||
|
if not os.path.exists(replacement_path):
|
||||||
|
raise ConflictError("Replacement file not found: %r" % (replacement_path,))
|
||||||
|
|
||||||
|
try:
|
||||||
|
os.rename(replaced_path, backup_path)
|
||||||
|
except OSError as e:
|
||||||
|
print e, e.errno
|
||||||
|
if e.errno != ENOENT:
|
||||||
|
raise
|
||||||
|
try:
|
||||||
|
rename_no_overwrite(replacement_path, replaced_path)
|
||||||
|
except EnvironmentError:
|
||||||
|
reraise(ConflictError)
|
||||||
|
|
||||||
|
|
||||||
|
PathInfo = namedtuple('PathInfo', 'isdir isfile islink exists size ctime mtime')
|
||||||
|
|
||||||
|
def get_pathinfo(path_u):
|
||||||
|
try:
|
||||||
|
statinfo = os.lstat(path_u)
|
||||||
|
mode = statinfo.st_mode
|
||||||
|
return PathInfo(isdir =stat.S_ISDIR(mode),
|
||||||
|
isfile=stat.S_ISREG(mode),
|
||||||
|
islink=stat.S_ISLNK(mode),
|
||||||
|
exists=True,
|
||||||
|
size =statinfo.st_size,
|
||||||
|
ctime =statinfo.st_ctime,
|
||||||
|
mtime =statinfo.st_mtime,
|
||||||
|
)
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno == ENOENT:
|
||||||
|
return PathInfo(isdir =False,
|
||||||
|
isfile=False,
|
||||||
|
islink=False,
|
||||||
|
exists=False,
|
||||||
|
size =None,
|
||||||
|
ctime =None,
|
||||||
|
mtime =None,
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
|
@ -0,0 +1,274 @@
|
||||||
|
|
||||||
|
# Windows near-equivalent to twisted.internet.inotify
|
||||||
|
# This should only be imported on Windows.
|
||||||
|
|
||||||
|
import os, sys
|
||||||
|
|
||||||
|
from twisted.internet import reactor
|
||||||
|
from twisted.internet.threads import deferToThread
|
||||||
|
|
||||||
|
from allmydata.util.fake_inotify import humanReadableMask, \
|
||||||
|
IN_WATCH_MASK, IN_ACCESS, IN_MODIFY, IN_ATTRIB, IN_CLOSE_NOWRITE, IN_CLOSE_WRITE, \
|
||||||
|
IN_OPEN, IN_MOVED_FROM, IN_MOVED_TO, IN_CREATE, IN_DELETE, IN_DELETE_SELF, \
|
||||||
|
IN_MOVE_SELF, IN_UNMOUNT, IN_Q_OVERFLOW, IN_IGNORED, IN_ONLYDIR, IN_DONT_FOLLOW, \
|
||||||
|
IN_MASK_ADD, IN_ISDIR, IN_ONESHOT, IN_CLOSE, IN_MOVED, IN_CHANGED
|
||||||
|
[humanReadableMask, \
|
||||||
|
IN_WATCH_MASK, IN_ACCESS, IN_MODIFY, IN_ATTRIB, IN_CLOSE_NOWRITE, IN_CLOSE_WRITE, \
|
||||||
|
IN_OPEN, IN_MOVED_FROM, IN_MOVED_TO, IN_CREATE, IN_DELETE, IN_DELETE_SELF, \
|
||||||
|
IN_MOVE_SELF, IN_UNMOUNT, IN_Q_OVERFLOW, IN_IGNORED, IN_ONLYDIR, IN_DONT_FOLLOW, \
|
||||||
|
IN_MASK_ADD, IN_ISDIR, IN_ONESHOT, IN_CLOSE, IN_MOVED, IN_CHANGED]
|
||||||
|
|
||||||
|
from allmydata.util.assertutil import _assert, precondition
|
||||||
|
from allmydata.util.encodingutil import quote_output
|
||||||
|
from allmydata.util import log, fileutil
|
||||||
|
from allmydata.util.pollmixin import PollMixin
|
||||||
|
|
||||||
|
from ctypes import WINFUNCTYPE, WinError, windll, POINTER, byref, create_string_buffer, addressof
|
||||||
|
from ctypes.wintypes import BOOL, HANDLE, DWORD, LPCWSTR, LPVOID
|
||||||
|
|
||||||
|
# <http://msdn.microsoft.com/en-us/library/gg258116%28v=vs.85%29.aspx>
|
||||||
|
FILE_LIST_DIRECTORY = 1
|
||||||
|
|
||||||
|
# <http://msdn.microsoft.com/en-us/library/aa363858%28v=vs.85%29.aspx>
|
||||||
|
CreateFileW = WINFUNCTYPE(HANDLE, LPCWSTR, DWORD, DWORD, LPVOID, DWORD, DWORD, HANDLE) \
|
||||||
|
(("CreateFileW", windll.kernel32))
|
||||||
|
|
||||||
|
FILE_SHARE_READ = 0x00000001
|
||||||
|
FILE_SHARE_WRITE = 0x00000002
|
||||||
|
FILE_SHARE_DELETE = 0x00000004
|
||||||
|
|
||||||
|
OPEN_EXISTING = 3
|
||||||
|
|
||||||
|
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
|
||||||
|
|
||||||
|
# <http://msdn.microsoft.com/en-us/library/ms724211%28v=vs.85%29.aspx>
|
||||||
|
CloseHandle = WINFUNCTYPE(BOOL, HANDLE)(("CloseHandle", windll.kernel32))
|
||||||
|
|
||||||
|
# <http://msdn.microsoft.com/en-us/library/aa365465%28v=vs.85%29.aspx>
|
||||||
|
ReadDirectoryChangesW = WINFUNCTYPE(BOOL, HANDLE, LPVOID, DWORD, BOOL, DWORD, POINTER(DWORD), LPVOID, LPVOID) \
|
||||||
|
(("ReadDirectoryChangesW", windll.kernel32))
|
||||||
|
|
||||||
|
FILE_NOTIFY_CHANGE_FILE_NAME = 0x00000001
|
||||||
|
FILE_NOTIFY_CHANGE_DIR_NAME = 0x00000002
|
||||||
|
FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x00000004
|
||||||
|
#FILE_NOTIFY_CHANGE_SIZE = 0x00000008
|
||||||
|
FILE_NOTIFY_CHANGE_LAST_WRITE = 0x00000010
|
||||||
|
FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x00000020
|
||||||
|
#FILE_NOTIFY_CHANGE_CREATION = 0x00000040
|
||||||
|
FILE_NOTIFY_CHANGE_SECURITY = 0x00000100
|
||||||
|
|
||||||
|
# <http://msdn.microsoft.com/en-us/library/aa364391%28v=vs.85%29.aspx>
|
||||||
|
FILE_ACTION_ADDED = 0x00000001
|
||||||
|
FILE_ACTION_REMOVED = 0x00000002
|
||||||
|
FILE_ACTION_MODIFIED = 0x00000003
|
||||||
|
FILE_ACTION_RENAMED_OLD_NAME = 0x00000004
|
||||||
|
FILE_ACTION_RENAMED_NEW_NAME = 0x00000005
|
||||||
|
|
||||||
|
_action_to_string = {
|
||||||
|
FILE_ACTION_ADDED : "FILE_ACTION_ADDED",
|
||||||
|
FILE_ACTION_REMOVED : "FILE_ACTION_REMOVED",
|
||||||
|
FILE_ACTION_MODIFIED : "FILE_ACTION_MODIFIED",
|
||||||
|
FILE_ACTION_RENAMED_OLD_NAME : "FILE_ACTION_RENAMED_OLD_NAME",
|
||||||
|
FILE_ACTION_RENAMED_NEW_NAME : "FILE_ACTION_RENAMED_NEW_NAME",
|
||||||
|
}
|
||||||
|
|
||||||
|
_action_to_inotify_mask = {
|
||||||
|
FILE_ACTION_ADDED : IN_CREATE,
|
||||||
|
FILE_ACTION_REMOVED : IN_DELETE,
|
||||||
|
FILE_ACTION_MODIFIED : IN_CHANGED,
|
||||||
|
FILE_ACTION_RENAMED_OLD_NAME : IN_MOVED_FROM,
|
||||||
|
FILE_ACTION_RENAMED_NEW_NAME : IN_MOVED_TO,
|
||||||
|
}
|
||||||
|
|
||||||
|
INVALID_HANDLE_VALUE = 0xFFFFFFFF
|
||||||
|
|
||||||
|
|
||||||
|
class Event(object):
|
||||||
|
"""
|
||||||
|
* action: a FILE_ACTION_* constant (not a bit mask)
|
||||||
|
* filename: a Unicode string, giving the name relative to the watched directory
|
||||||
|
"""
|
||||||
|
def __init__(self, action, filename):
|
||||||
|
self.action = action
|
||||||
|
self.filename = filename
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "Event(%r, %r)" % (_action_to_string.get(self.action, self.action), self.filename)
|
||||||
|
|
||||||
|
|
||||||
|
class FileNotifyInformation(object):
|
||||||
|
"""
|
||||||
|
I represent a buffer containing FILE_NOTIFY_INFORMATION structures, and can
|
||||||
|
iterate over those structures, decoding them into Event objects.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, size=1024):
|
||||||
|
self.size = size
|
||||||
|
self.buffer = create_string_buffer(size)
|
||||||
|
address = addressof(self.buffer)
|
||||||
|
_assert(address & 3 == 0, "address 0x%X returned by create_string_buffer is not DWORD-aligned" % (address,))
|
||||||
|
self.data = None
|
||||||
|
|
||||||
|
def read_changes(self, hDirectory, recursive, filter):
|
||||||
|
bytes_returned = DWORD(0)
|
||||||
|
r = ReadDirectoryChangesW(hDirectory,
|
||||||
|
self.buffer,
|
||||||
|
self.size,
|
||||||
|
recursive,
|
||||||
|
filter,
|
||||||
|
byref(bytes_returned),
|
||||||
|
None, # NULL -> no overlapped I/O
|
||||||
|
None # NULL -> no completion routine
|
||||||
|
)
|
||||||
|
if r == 0:
|
||||||
|
raise WinError()
|
||||||
|
self.data = self.buffer.raw[:bytes_returned.value]
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
# Iterator implemented as generator: <http://docs.python.org/library/stdtypes.html#generator-types>
|
||||||
|
pos = 0
|
||||||
|
while True:
|
||||||
|
bytes = self._read_dword(pos+8)
|
||||||
|
s = Event(self._read_dword(pos+4),
|
||||||
|
self.data[pos+12 : pos+12+bytes].decode('utf-16-le'))
|
||||||
|
|
||||||
|
next_entry_offset = self._read_dword(pos)
|
||||||
|
yield s
|
||||||
|
if next_entry_offset == 0:
|
||||||
|
break
|
||||||
|
pos = pos + next_entry_offset
|
||||||
|
|
||||||
|
def _read_dword(self, i):
|
||||||
|
# little-endian
|
||||||
|
return ( ord(self.data[i]) |
|
||||||
|
(ord(self.data[i+1]) << 8) |
|
||||||
|
(ord(self.data[i+2]) << 16) |
|
||||||
|
(ord(self.data[i+3]) << 24))
|
||||||
|
|
||||||
|
|
||||||
|
def _open_directory(path_u):
|
||||||
|
hDirectory = CreateFileW(path_u,
|
||||||
|
FILE_LIST_DIRECTORY, # access rights
|
||||||
|
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
|
||||||
|
# don't prevent other processes from accessing
|
||||||
|
None, # no security descriptor
|
||||||
|
OPEN_EXISTING, # directory must already exist
|
||||||
|
FILE_FLAG_BACKUP_SEMANTICS, # necessary to open a directory
|
||||||
|
None # no template file
|
||||||
|
)
|
||||||
|
if hDirectory == INVALID_HANDLE_VALUE:
|
||||||
|
e = WinError()
|
||||||
|
raise OSError("Opening directory %s gave Windows error %r: %s" % (quote_output(path_u), e.args[0], e.args[1]))
|
||||||
|
return hDirectory
|
||||||
|
|
||||||
|
|
||||||
|
def simple_test():
|
||||||
|
path_u = u"test"
|
||||||
|
filter = FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME | FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||||
|
recursive = False
|
||||||
|
|
||||||
|
hDirectory = _open_directory(path_u)
|
||||||
|
fni = FileNotifyInformation()
|
||||||
|
print "Waiting..."
|
||||||
|
while True:
|
||||||
|
fni.read_changes(hDirectory, recursive, filter)
|
||||||
|
print repr(fni.data)
|
||||||
|
for info in fni:
|
||||||
|
print info
|
||||||
|
|
||||||
|
|
||||||
|
NOT_STARTED = "NOT_STARTED"
|
||||||
|
STARTED = "STARTED"
|
||||||
|
STOPPING = "STOPPING"
|
||||||
|
STOPPED = "STOPPED"
|
||||||
|
|
||||||
|
class INotify(PollMixin):
|
||||||
|
def __init__(self):
|
||||||
|
self._state = NOT_STARTED
|
||||||
|
self._filter = None
|
||||||
|
self._callbacks = None
|
||||||
|
self._hDirectory = None
|
||||||
|
self._path = None
|
||||||
|
self._pending = set()
|
||||||
|
self._pending_delay = 1.0
|
||||||
|
|
||||||
|
def set_pending_delay(self, delay):
|
||||||
|
self._pending_delay = delay
|
||||||
|
|
||||||
|
def startReading(self):
|
||||||
|
deferToThread(self._thread)
|
||||||
|
return self.poll(lambda: self._state != NOT_STARTED)
|
||||||
|
|
||||||
|
def stopReading(self):
|
||||||
|
# FIXME race conditions
|
||||||
|
if self._state != STOPPED:
|
||||||
|
self._state = STOPPING
|
||||||
|
|
||||||
|
def wait_until_stopped(self):
|
||||||
|
fileutil.write(os.path.join(self._path.path, u".ignore-me"), "")
|
||||||
|
return self.poll(lambda: self._state == STOPPED)
|
||||||
|
|
||||||
|
def watch(self, path, mask=IN_WATCH_MASK, autoAdd=False, callbacks=None, recursive=False):
|
||||||
|
precondition(self._state == NOT_STARTED, "watch() can only be called before startReading()", state=self._state)
|
||||||
|
precondition(self._filter is None, "only one watch is supported")
|
||||||
|
precondition(isinstance(autoAdd, bool), autoAdd=autoAdd)
|
||||||
|
precondition(isinstance(recursive, bool), recursive=recursive)
|
||||||
|
precondition(autoAdd == recursive, "need autoAdd and recursive to be the same", autoAdd=autoAdd, recursive=recursive)
|
||||||
|
|
||||||
|
self._path = path
|
||||||
|
path_u = path.path
|
||||||
|
if not isinstance(path_u, unicode):
|
||||||
|
path_u = path_u.decode(sys.getfilesystemencoding())
|
||||||
|
_assert(isinstance(path_u, unicode), path_u=path_u)
|
||||||
|
|
||||||
|
self._filter = FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME | FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||||
|
|
||||||
|
if mask & (IN_ACCESS | IN_CLOSE_NOWRITE | IN_OPEN):
|
||||||
|
self._filter = self._filter | FILE_NOTIFY_CHANGE_LAST_ACCESS
|
||||||
|
if mask & IN_ATTRIB:
|
||||||
|
self._filter = self._filter | FILE_NOTIFY_CHANGE_ATTRIBUTES | FILE_NOTIFY_CHANGE_SECURITY
|
||||||
|
|
||||||
|
self._recursive = recursive
|
||||||
|
self._callbacks = callbacks or []
|
||||||
|
self._hDirectory = _open_directory(path_u)
|
||||||
|
|
||||||
|
def _thread(self):
|
||||||
|
try:
|
||||||
|
_assert(self._filter is not None, "no watch set")
|
||||||
|
|
||||||
|
# To call Twisted or Tahoe APIs, use reactor.callFromThread as described in
|
||||||
|
# <http://twistedmatrix.com/documents/current/core/howto/threading.html>.
|
||||||
|
|
||||||
|
fni = FileNotifyInformation()
|
||||||
|
|
||||||
|
while True:
|
||||||
|
self._state = STARTED
|
||||||
|
fni.read_changes(self._hDirectory, self._recursive, self._filter)
|
||||||
|
for info in fni:
|
||||||
|
if self._state == STOPPING:
|
||||||
|
hDirectory = self._hDirectory
|
||||||
|
self._callbacks = None
|
||||||
|
self._hDirectory = None
|
||||||
|
CloseHandle(hDirectory)
|
||||||
|
self._state = STOPPED
|
||||||
|
return
|
||||||
|
|
||||||
|
path = self._path.preauthChild(info.filename) # FilePath with Unicode path
|
||||||
|
#mask = _action_to_inotify_mask.get(info.action, IN_CHANGED)
|
||||||
|
|
||||||
|
def _maybe_notify(path):
|
||||||
|
if path not in self._pending:
|
||||||
|
self._pending.add(path)
|
||||||
|
def _do_callbacks():
|
||||||
|
self._pending.remove(path)
|
||||||
|
for cb in self._callbacks:
|
||||||
|
try:
|
||||||
|
cb(None, path, IN_CHANGED)
|
||||||
|
except Exception, e:
|
||||||
|
log.err(e)
|
||||||
|
reactor.callLater(self._pending_delay, _do_callbacks)
|
||||||
|
reactor.callFromThread(_maybe_notify, path)
|
||||||
|
except Exception, e:
|
||||||
|
log.err(e)
|
||||||
|
self._state = STOPPED
|
||||||
|
raise
|
Loading…
Reference in New Issue