Teach uploader+downloader to use to db schema
here we attempt to fix all the unit tests as well... however two tests still fail
This commit is contained in:
parent
19c4681b85
commit
a404e7104c
|
@ -280,7 +280,9 @@ class Uploader(QueueMixin):
|
|||
|
||||
d = defer.succeed(None)
|
||||
|
||||
def _maybe_upload(val):
|
||||
def _maybe_upload(val, now=None):
|
||||
if now is None:
|
||||
now = time.time()
|
||||
fp = self._get_filepath(relpath_u)
|
||||
pathinfo = get_pathinfo(unicode_from_filepath(fp))
|
||||
|
||||
|
@ -293,19 +295,22 @@ class Uploader(QueueMixin):
|
|||
self._count('objects_disappeared')
|
||||
d2 = defer.succeed(None)
|
||||
if self._db.check_file_db_exists(relpath_u):
|
||||
last_downloaded_timestamp = now
|
||||
d2.addCallback(lambda ign: self._get_metadata(encoded_path_u))
|
||||
last_downloaded_uri = self._db.get_last_downloaded_uri(relpath_u)
|
||||
current_version = self._db.get_local_file_version(relpath_u) + 1
|
||||
new_metadata = {}
|
||||
def set_deleted(metadata):
|
||||
metadata['last_downloaded_uri'] = last_downloaded_uri
|
||||
metadata['version'] = current_version
|
||||
metadata['deleted'] = True
|
||||
last_downloaded_uri = metadata.get('last_downloaded_uri', None)
|
||||
new_metadata['last_downloaded_uri'] = last_downloaded_uri # XXX this has got to be wrong
|
||||
new_metadata['version'] = current_version
|
||||
new_metadata['deleted'] = True
|
||||
empty_uploadable = Data("", self._client.convergence)
|
||||
return self._upload_dirnode.add_file(encoded_path_u, empty_uploadable, overwrite=True, metadata=metadata)
|
||||
d2.addCallback(set_deleted)
|
||||
def add_db_entry(filenode):
|
||||
filecap = filenode.get_uri()
|
||||
self._db.did_upload_version(filecap, relpath_u, current_version, pathinfo)
|
||||
|
||||
self._db.did_upload_version(relpath_u, current_version, filecap, last_downloaded_uri, last_downloaded_timestamp, pathinfo)
|
||||
self._count('files_uploaded')
|
||||
|
||||
# FIXME consider whether it's correct to retrieve the filenode again.
|
||||
|
@ -345,10 +350,13 @@ class Uploader(QueueMixin):
|
|||
metadata = { "version":version }
|
||||
if last_downloaded_uri is not None:
|
||||
metadata["last_downloaded_uri"] = last_downloaded_uri
|
||||
metadata["last_downloaded_timestamp"] = now
|
||||
d2 = self._upload_dirnode.add_file(encoded_path_u, uploadable, metadata=metadata, overwrite=True)
|
||||
def add_db_entry(filenode):
|
||||
filecap = filenode.get_uri()
|
||||
self._db.did_upload_version(filecap, relpath_u, version, pathinfo)
|
||||
last_downloaded_uri = metadata.get('last_downloaded_uri', None)
|
||||
last_downloaded_timestamp = now
|
||||
self._db.did_upload_version(relpath_u, version, filecap, last_downloaded_uri, last_downloaded_timestamp, pathinfo)
|
||||
d2.addCallback(add_db_entry)
|
||||
return d2
|
||||
else:
|
||||
|
|
|
@ -16,12 +16,12 @@ CREATE TABLE local_files
|
|||
path VARCHAR(1024) PRIMARY KEY, -- UTF-8 filename relative to local magic folder dir
|
||||
-- note that size is before mtime and ctime here, but after in function parameters
|
||||
size INTEGER, -- ST_SIZE, or NULL if the file has been deleted
|
||||
mtime REAL, -- ST_MTIME
|
||||
ctime REAL, -- ST_CTIME
|
||||
mtime NUMBER, -- ST_MTIME
|
||||
ctime NUMBER, -- ST_CTIME
|
||||
version INTEGER,
|
||||
last_uploaded_uri VARCHAR(256) UNIQUE, -- URI:CHK:...
|
||||
last_downloaded_uri VARCHAR(256) UNIQUE, -- URI:CHK:...
|
||||
last_downloaded_timestamp REAL
|
||||
last_downloaded_timestamp TIMESTAMP
|
||||
);
|
||||
"""
|
||||
|
||||
|
@ -107,18 +107,17 @@ class MagicFolderDB(object):
|
|||
else:
|
||||
return row[0]
|
||||
|
||||
def did_upload_version(self, filecap, relpath_u, version, pathinfo):
|
||||
print "did_upload_version(%r, %r, %r, %r)" % (filecap, relpath_u, version, pathinfo)
|
||||
def did_upload_version(self, relpath_u, version, last_uploaded_uri, last_downloaded_uri, last_downloaded_timestamp, pathinfo):
|
||||
try:
|
||||
print "insert"
|
||||
self.cursor.execute("INSERT INTO local_files VALUES (?,?,?,?,?,?)",
|
||||
(relpath_u, pathinfo.size, pathinfo.mtime, pathinfo.ctime, version, filecap, pathinfo.mtime))
|
||||
self.cursor.execute("INSERT INTO local_files VALUES (?,?,?,?,?,?,?,?)",
|
||||
(relpath_u, pathinfo.size, pathinfo.mtime, pathinfo.ctime, version, last_uploaded_uri, last_downloaded_uri, last_downloaded_timestamp))
|
||||
except (self.sqlite_module.IntegrityError, self.sqlite_module.OperationalError):
|
||||
print "err... update"
|
||||
self.cursor.execute("UPDATE local_files"
|
||||
" SET size=?, mtime=?, ctime=?, version=?, last_downloaded_uri=?, last_downloaded_timestamp=?"
|
||||
" SET size=?, mtime=?, ctime=?, version=?, last_uploaded_uri=?, last_downloaded_uri=?, last_downloaded_timestamp=?"
|
||||
" WHERE path=?",
|
||||
(pathinfo.size, pathinfo.mtime, pathinfo.ctime, version, filecap, pathinfo.mtime, relpath_u))
|
||||
(pathinfo.size, pathinfo.mtime, pathinfo.ctime, version, last_uploaded_uri, last_downloaded_uri, last_downloaded_timestamp, relpath_u))
|
||||
self.connection.commit()
|
||||
print "commited"
|
||||
|
||||
|
@ -127,7 +126,6 @@ class MagicFolderDB(object):
|
|||
Returns true if the file's current pathinfo (size, mtime, and ctime) has
|
||||
changed from the pathinfo previously stored in the db.
|
||||
"""
|
||||
#print "is_new_file(%r, %r)" % (pathinfo, relpath_u)
|
||||
c = self.cursor
|
||||
c.execute("SELECT size, mtime, ctime"
|
||||
" FROM local_files"
|
||||
|
|
|
@ -65,10 +65,11 @@ class MagicFolderTestMixin(MagicFolderCLITestMixin, ShouldFailMixin, ReallyEqual
|
|||
fileutil.make_dirs(self.basedir)
|
||||
db = self._createdb()
|
||||
|
||||
|
||||
relpath1 = u"myFile1"
|
||||
pathinfo = fileutil.PathInfo(isdir=False, isfile=True, islink=False,
|
||||
exists=True, size=1, mtime=123, ctime=456)
|
||||
db.did_upload_version('URI:LIT:1', relpath1, 0, pathinfo)
|
||||
db.did_upload_version(relpath1, 0, 'URI:LIT:1', 'URI:LIT:0', 0, pathinfo)
|
||||
|
||||
c = db.cursor
|
||||
c.execute("SELECT size, mtime, ctime"
|
||||
|
@ -84,7 +85,7 @@ class MagicFolderTestMixin(MagicFolderCLITestMixin, ShouldFailMixin, ReallyEqual
|
|||
path2 = os.path.join(self.basedir, relpath2)
|
||||
fileutil.write(path2, "meow\n")
|
||||
pathinfo = fileutil.get_pathinfo(path2)
|
||||
db.did_upload_version('URI:LIT:2', relpath2, 0, pathinfo)
|
||||
db.did_upload_version(relpath2, 0, 'URI:LIT:2', 'URI:LIT:1', 0, pathinfo)
|
||||
self.failUnlessFalse(db.is_new_file(pathinfo, relpath2))
|
||||
|
||||
different_pathinfo = fileutil.PathInfo(isdir=False, isfile=True, islink=False,
|
||||
|
@ -128,7 +129,7 @@ class MagicFolderTestMixin(MagicFolderCLITestMixin, ShouldFailMixin, ReallyEqual
|
|||
d.addCallback(self._restart_client)
|
||||
|
||||
def _check_move_empty_tree(res):
|
||||
#print "_check_move_empty_tree"
|
||||
print "_check_move_empty_tree"
|
||||
self.mkdir_nonascii(empty_tree_dir)
|
||||
d2 = self.magicfolder.uploader.set_hook('processed')
|
||||
os.rename(empty_tree_dir, new_empty_tree_dir)
|
||||
|
@ -142,7 +143,7 @@ class MagicFolderTestMixin(MagicFolderCLITestMixin, ShouldFailMixin, ReallyEqual
|
|||
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 1))
|
||||
|
||||
def _check_move_small_tree(res):
|
||||
#print "_check_move_small_tree"
|
||||
print "_check_move_small_tree"
|
||||
self.mkdir_nonascii(small_tree_dir)
|
||||
fileutil.write(abspath_expanduser_unicode(u"what", base=small_tree_dir), "say when")
|
||||
d2 = self.magicfolder.uploader.set_hook('processed', ignore_count=1)
|
||||
|
@ -157,7 +158,7 @@ class MagicFolderTestMixin(MagicFolderCLITestMixin, ShouldFailMixin, ReallyEqual
|
|||
d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 2))
|
||||
|
||||
def _check_moved_tree_is_watched(res):
|
||||
#print "_check_moved_tree_is_watched"
|
||||
print "_check_moved_tree_is_watched"
|
||||
d2 = self.magicfolder.uploader.set_hook('processed')
|
||||
fileutil.write(abspath_expanduser_unicode(u"another", base=new_small_tree_dir), "file")
|
||||
self.notify(to_filepath(abspath_expanduser_unicode(u"another", base=new_small_tree_dir)), self.inotify.IN_CLOSE_WRITE)
|
||||
|
|
Loading…
Reference in New Issue