@@ -45,7 +45,7 @@
> # Touching files truncated at "transaction.abort" causes
> # forcible re-loading invalidated filecache properties
> # (including repo.changelog)
- > for f, o, _ignore in entries:
+ > for f, o in entries:
> if o or not unlink:
> os.utime(opener.join(f), (0.0, 0.0))
> def extsetup(ui):
@@ -56,7 +56,7 @@
unlink=True,
checkambigfiles=None,
):
- for f, o, _ignore in entries:
+ for f, o in entries:
if o or not unlink:
checkambig = checkambigfiles and (f, b'') in checkambigfiles
try:
@@ -243,25 +243,25 @@
This is used by strip to delay vision of strip offset. The transaction
sees either none or all of the strip actions to be done."""
q = self._queue.pop()
- for f, o, data in q:
- self._addentry(f, o, data)
+ for f, o in q:
+ self._addentry(f, o)
@active
- def add(self, file, offset, data=None):
+ def add(self, file, offset):
"""record the state of an append-only file before update"""
if file in self._map or file in self._backupmap:
return
if self._queue:
- self._queue[-1].append((file, offset, data))
+ self._queue[-1].append((file, offset))
return
- self._addentry(file, offset, data)
+ self._addentry(file, offset)
- def _addentry(self, file, offset, data):
+ def _addentry(self, file, offset):
"""add a append-only entry to memory and on-disk state"""
if file in self._map or file in self._backupmap:
return
- self._entries.append((file, offset, data))
+ self._entries.append((file, offset))
self._map[file] = len(self._entries) - 1
# add enough data to the journal to do the truncate
self._file.write(b"%s\0%d\n" % (file, offset))
@@ -403,7 +403,7 @@
return None
@active
- def replace(self, file, offset, data=None):
+ def replace(self, file, offset):
'''
replace can only replace already committed entries
that are not pending in the queue
@@ -412,7 +412,7 @@
if file not in self._map:
raise KeyError(file)
index = self._map[file]
- self._entries[index] = (file, offset, data)
+ self._entries[index] = (file, offset)
self._file.write(b"%s\0%d\n" % (file, offset))
self._file.flush()
@@ -696,7 +696,7 @@
for l in lines:
try:
f, o = l.split(b'\0')
- entries.append((f, int(o), None))
+ entries.append((f, int(o)))
except ValueError:
report(
_(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
@@ -2005,16 +2005,9 @@
raise error.RevlogError(
_(b"%s not found in the transaction") % self.indexfile
)
-
- trindex = trinfo[2]
- if trindex is not None:
- dataoff = self.start(trindex)
- else:
- # revlog was stripped at start of transaction, use all leftover data
- trindex = len(self) - 1
- dataoff = self.end(tiprev)
-
- tr.add(self.datafile, dataoff)
+ troffset = trinfo[1]
+ trindex = 0
+ tr.add(self.datafile, 0)
if fp:
fp.flush()
@@ -2026,6 +2019,8 @@
with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
for r in self:
dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
+ if troffset <= self.start(r):
+ trindex = r
with self._indexfp(b'w') as fp:
self.version &= ~FLAG_INLINE_DATA
@@ -2361,7 +2356,7 @@
ifh.write(entry)
else:
offset += curr * self._io.size
- transaction.add(self.indexfile, offset, curr)
+ transaction.add(self.indexfile, offset)
ifh.write(entry)
ifh.write(data[0])
ifh.write(data[1])
@@ -2397,10 +2392,10 @@
ifh = self._indexfp(b"a+")
isize = r * self._io.size
if self._inline:
- transaction.add(self.indexfile, end + isize, r)
+ transaction.add(self.indexfile, end + isize)
dfh = None
else:
- transaction.add(self.indexfile, isize, r)
+ transaction.add(self.indexfile, isize)
transaction.add(self.datafile, end)
dfh = self._datafp(b"a+")
@@ -220,7 +220,7 @@
tr.endgroup()
for i in pycompat.xrange(offset, len(tr._entries)):
- file, troffset, ignore = tr._entries[i]
+ file, troffset = tr._entries[i]
with repo.svfs(file, b'a', checkambig=True) as fp:
fp.truncate(troffset)
if troffset == 0: