mutable: fix use of storage API
This commit is contained in:
parent
c4f7412f1c
commit
7e43c7b5f9
|
@ -181,12 +181,15 @@ class RIStorageServer(RemoteInterface):
|
||||||
Each share can have a separate test vector (i.e. a list of
|
Each share can have a separate test vector (i.e. a list of
|
||||||
comparisons to perform). If all vectors for all shares pass, then all
|
comparisons to perform). If all vectors for all shares pass, then all
|
||||||
writes for all shares are recorded. Each comparison is a 4-tuple of
|
writes for all shares are recorded. Each comparison is a 4-tuple of
|
||||||
(offset, length, operator, specimen), which effectively does a
|
(offset, length, operator, specimen), which effectively does a bool(
|
||||||
read(offset, length) and then compares the result against the
|
(read(offset, length)) OPERATOR specimen ) and only performs the
|
||||||
specimen using the given equality/inequality operator. Reads from the
|
write if all these evaluate to True. Basic test-and-set uses 'eq'.
|
||||||
end of the container are truncated, and missing shares behave like
|
Write-if-newer uses a seqnum and (offset, length, 'lt', specimen).
|
||||||
empty ones, so to assert that a share doesn't exist (for use when
|
Write-if-same-or-newer uses 'le'.
|
||||||
creating a new share), use (0, 1, 'eq', '').
|
|
||||||
|
Reads from the end of the container are truncated, and missing shares
|
||||||
|
behave like empty ones, so to assert that a share doesn't exist (for
|
||||||
|
use when creating a new share), use (0, 1, 'eq', '').
|
||||||
|
|
||||||
The write vector will be applied to the given share, expanding it if
|
The write vector will be applied to the given share, expanding it if
|
||||||
necessary. A write vector applied to a share number that did not
|
necessary. A write vector applied to a share number that did not
|
||||||
|
|
|
@ -272,7 +272,8 @@ class Retrieve:
|
||||||
peer_storage_servers[peerid] = ss
|
peer_storage_servers[peerid] = ss
|
||||||
return ss
|
return ss
|
||||||
d.addCallback(_got_storageserver)
|
d.addCallback(_got_storageserver)
|
||||||
d.addCallback(lambda ss: ss.callRemote("readv_slots", [(0, readsize)]))
|
d.addCallback(lambda ss: ss.callRemote("slot_readv", storage_index,
|
||||||
|
[], [(0, readsize)]))
|
||||||
d.addCallback(self._got_results, peerid, readsize)
|
d.addCallback(self._got_results, peerid, readsize)
|
||||||
d.addErrback(self._query_failed, peerid, (conn, storage_index,
|
d.addErrback(self._query_failed, peerid, (conn, storage_index,
|
||||||
peer_storage_servers))
|
peer_storage_servers))
|
||||||
|
@ -676,7 +677,8 @@ class Publish:
|
||||||
peer_storage_servers = {}
|
peer_storage_servers = {}
|
||||||
dl = []
|
dl = []
|
||||||
for (permutedid, peerid, conn) in partial_peerlist:
|
for (permutedid, peerid, conn) in partial_peerlist:
|
||||||
d = self._do_query(conn, peerid, peer_storage_servers)
|
d = self._do_query(conn, peerid, peer_storage_servers,
|
||||||
|
storage_index)
|
||||||
d.addCallback(self._got_query_results,
|
d.addCallback(self._got_query_results,
|
||||||
peerid, permutedid,
|
peerid, permutedid,
|
||||||
reachable_peers, current_share_peers)
|
reachable_peers, current_share_peers)
|
||||||
|
@ -688,11 +690,11 @@ class Publish:
|
||||||
# TODO: add an errback to, probably to ignore that peer
|
# TODO: add an errback to, probably to ignore that peer
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def _do_query(self, conn, peerid, peer_storage_servers):
|
def _do_query(self, conn, peerid, peer_storage_servers, storage_index):
|
||||||
d = conn.callRemote("get_service", "storageserver")
|
d = conn.callRemote("get_service", "storageserver")
|
||||||
def _got_storageserver(ss):
|
def _got_storageserver(ss):
|
||||||
peer_storage_servers[peerid] = ss
|
peer_storage_servers[peerid] = ss
|
||||||
return ss.callRemote("readv_slots", [(0, 2000)])
|
return ss.callRemote("slot_readv", storage_index, [], [(0, 2000)])
|
||||||
d.addCallback(_got_storageserver)
|
d.addCallback(_got_storageserver)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
@ -770,7 +772,7 @@ class Publish:
|
||||||
|
|
||||||
for shnum, peers in target_map.items():
|
for shnum, peers in target_map.items():
|
||||||
for (peerid, old_seqnum, old_root_hash) in peers:
|
for (peerid, old_seqnum, old_root_hash) in peers:
|
||||||
testv = [(0, len(my_checkstring), "ge", my_checkstring)]
|
testv = [(0, len(my_checkstring), "le", my_checkstring)]
|
||||||
new_share = self._new_shares[shnum]
|
new_share = self._new_shares[shnum]
|
||||||
writev = [(0, new_share)]
|
writev = [(0, new_share)]
|
||||||
if peerid not in peer_messages:
|
if peerid not in peer_messages:
|
||||||
|
|
|
@ -64,7 +64,7 @@ class FakeFilenode(mutable.MutableFileNode):
|
||||||
return "fake readonly"
|
return "fake readonly"
|
||||||
|
|
||||||
class FakePublish(mutable.Publish):
|
class FakePublish(mutable.Publish):
|
||||||
def _do_query(self, conn, peerid, peer_storage_servers):
|
def _do_query(self, conn, peerid, peer_storage_servers, storage_index):
|
||||||
assert conn[0] == peerid
|
assert conn[0] == peerid
|
||||||
shares = self._peers[peerid]
|
shares = self._peers[peerid]
|
||||||
return defer.succeed(shares)
|
return defer.succeed(shares)
|
||||||
|
|
Loading…
Reference in New Issue