Patchwork [2,of,3] perf: teach perfrevlogrevision about sparse reading

login
register
mail settings
Submitter Boris Feld
Date Nov. 6, 2018, 10:34 a.m.
Message ID <59d548edb4ce2dafb989.1541500462@Laptop-Boris.lan>
Download mbox | patch
Permalink /patch/36413/
State Accepted
Headers show

Comments

Boris Feld - Nov. 6, 2018, 10:34 a.m.
# HG changeset patch
# User Boris Feld <boris.feld@octobus.net>
# Date 1541498663 -3600
#      Tue Nov 06 11:04:23 2018 +0100
# Node ID 59d548edb4ce2dafb989ffc2d0a95fb4bb19d2ee
# Parent  92466f201ed80783a2e1d37ad4fd1ff80df1f36a
# EXP-Topic sparse-prefrevlogrevision
# Available At https://bitbucket.org/octobus/mercurial-devel/
#              hg pull https://bitbucket.org/octobus/mercurial-devel/ -r 59d548edb4ce
perf: teach perfrevlogrevision about sparse reading

Before this change, chunks were always read in a single block. Even in the
sparse-read/sparse-revlog case. This gave a false view of the performance and
could lead to memory consumption issue.

Patch

diff --git a/contrib/perf.py b/contrib/perf.py
--- a/contrib/perf.py
+++ b/contrib/perf.py
@@ -1723,17 +1723,18 @@  def perfrevlogrevision(ui, repo, file_, 
         inline = r._inline
         iosize = r._io.size
         buffer = util.buffer
-        offset = start(chain[0])
 
         chunks = []
         ladd = chunks.append
-
-        for rev in chain:
-            chunkstart = start(rev)
-            if inline:
-                chunkstart += (rev + 1) * iosize
-            chunklength = length(rev)
-            ladd(buffer(data, chunkstart - offset, chunklength))
+        for idx, item in enumerate(chain):
+            offset = start(item[0])
+            bits = data[idx]
+            for rev in item:
+                chunkstart = start(rev)
+                if inline:
+                    chunkstart += (rev + 1) * iosize
+                chunklength = length(rev)
+                ladd(buffer(bits, chunkstart - offset, chunklength))
 
         return chunks
 
@@ -1745,7 +1746,8 @@  def perfrevlogrevision(ui, repo, file_, 
     def doread(chain):
         if not cache:
             r.clearcaches()
-        segmentforrevs(chain[0], chain[-1])
+        for item in slicedchain:
+            segmentforrevs(item[0], item[-1])
 
     def dorawchunks(data, chain):
         if not cache:
@@ -1772,9 +1774,20 @@  def perfrevlogrevision(ui, repo, file_, 
             r.clearcaches()
         r.revision(node)
 
+    try:
+        from mercurial.revlogutils.deltas import slicechunk
+    except ImportError:
+        slicechunk = getattr(revlog, '_slicechunk', None)
+
+
+    size = r.length(rev)
     chain = r._deltachain(rev)[0]
-    data = segmentforrevs(chain[0], chain[-1])[1]
-    rawchunks = getrawchunks(data, chain)
+    if not getattr(r, '_withsparseread', False):
+        slicedchain = (chain,)
+    else:
+        slicedchain = tuple(slicechunk(r, chain, targetsize=size))
+    data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
+    rawchunks = getrawchunks(data, slicedchain)
     bins = r._chunks(chain)
     text = bytes(bins[0])
     bins = bins[1:]
@@ -1784,7 +1797,7 @@  def perfrevlogrevision(ui, repo, file_, 
         (lambda: dorevision(), b'full'),
         (lambda: dodeltachain(rev), b'deltachain'),
         (lambda: doread(chain), b'read'),
-        (lambda: dorawchunks(data, chain), b'rawchunks'),
+        (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
         (lambda: dodecompress(rawchunks), b'decompress'),
         (lambda: dopatch(text, bins), b'patch'),
         (lambda: dohash(text), b'hash'),