Patchwork [2,of,2,V4] perf: add asv benchmarks

login
register
mail settings
Submitter Philippe Pepiot
Date Nov. 16, 2016, 9:28 a.m.
Message ID <4740556dbb1b6212b31c.1479288488@sh71.tls.logilab.fr>
Download mbox | patch
Permalink /patch/17597/
State Accepted
Headers show

Comments

Philippe Pepiot - Nov. 16, 2016, 9:28 a.m.
# HG changeset patch
# User Philippe Pepiot <philippe.pepiot@logilab.fr>
# Date 1475136994 -7200
#      Thu Sep 29 10:16:34 2016 +0200
# Node ID 4740556dbb1b6212b31ce199feebd030ca886b24
# Parent  ab6e50ddc2c56dcf170991293005be6d6f80a232
perf: add asv benchmarks

Airspeed velocity (ASV) is a python framework for benchmarking Python packages
over their lifetime. The results are displayed in an interactive web frontend.

Add ASV benchmarks for mercurial that use contrib/perf.py extension that could
be run against multiple reference repositories.

The benchmark suite now includes revsets from contrib/base-revsets.txt with
variants, perftags, perfstatus, perfmanifest and perfheads.

Installation requires asv>=0.2, python-hglib and virtualenv

This is part of PerformanceTrackingSuitePlan
https://www.mercurial-scm.org/wiki/PerformanceTrackingSuitePlan

Patch

diff --git a/.hgignore b/.hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -49,6 +49,7 @@  mercurial.egg-info
 tags
 cscope.*
 .idea/*
+.asv/*
 i18n/hg.pot
 locale/*/LC_MESSAGES/hg.mo
 hgext/__index__.py
diff --git a/contrib/asv.conf.json b/contrib/asv.conf.json
new file mode 100644
--- /dev/null
+++ b/contrib/asv.conf.json
@@ -0,0 +1,13 @@ 
+{
+    "version": 1,
+    "project": "mercurial",
+    "project_url": "https://mercurial-scm.org/",
+    "repo": "..",
+    "branches": ["default", "stable"],
+    "environment_type": "virtualenv",
+    "show_commit_url": "https://www.mercurial-scm.org/repo/hg/rev/",
+    "benchmark_dir": "benchmarks",
+    "env_dir": "../.asv/env",
+    "results_dir": "../.asv/results",
+    "html_dir": "../.asv/html"
+}
diff --git a/contrib/benchmarks/__init__.py b/contrib/benchmarks/__init__.py
new file mode 100644
--- /dev/null
+++ b/contrib/benchmarks/__init__.py
@@ -0,0 +1,102 @@ 
+# __init__.py - asv benchmark suite
+#
+# Copyright 2016 Logilab SA <contact@logilab.fr>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''ASV (https://asv.readthedocs.io) benchmark suite
+
+Benchmark are parameterized against reference repositories found in the
+directory pointed by the REPOS_DIR environment variable.
+
+Invocation example:
+
+    $ export REPOS_DIR=~/hgperf/repos
+    # run suite on given revision
+    $ asv --config contrib/asv.conf.json run REV
+    # run suite on new changesets found in stable and default branch
+    $ asv --config contrib/asv.conf.json run NEW
+    # display a comparative result table of benchmark results between two given
+    # revisions
+    $ asv --config contrib/asv.conf.json compare REV1 REV2
+    # compute regression detection and generate ASV static website
+    $ asv --config contrib/asv.conf.json publish
+    # serve the static website
+    $ asv --config contrib/asv.conf.json preview
+'''
+
+from __future__ import absolute_import
+
+import functools
+import os
+import re
+
+from mercurial import (
+    extensions,
+    hg,
+    ui as uimod,
+)
+
+basedir = os.path.abspath(os.path.join(os.path.dirname(__file__),
+                          os.path.pardir, os.path.pardir))
+reposdir = os.environ['REPOS_DIR']
+reposnames = [name for name in os.listdir(reposdir)
+              if os.path.isdir(os.path.join(reposdir, name, ".hg"))]
+if not reposnames:
+    raise ValueError("No repositories found in $REPO_DIR")
+outputre = re.compile((r'! wall (\d+.\d+) comb \d+.\d+ user \d+.\d+ sys '
+                       r'\d+.\d+ \(best of \d+\)'))
+
+def runperfcommand(reponame, command, *args, **kwargs):
+    os.environ["HGRCPATH"] = os.environ.get("ASVHGRCPATH", "")
+    ui = uimod.ui()
+    repo = hg.repository(ui, os.path.join(reposdir, reponame))
+    perfext = extensions.load(ui, 'perfext',
+                              os.path.join(basedir, 'contrib', 'perf.py'))
+    cmd = getattr(perfext, command)
+    ui.pushbuffer()
+    cmd(ui, repo, *args, **kwargs)
+    output = ui.popbuffer()
+    match = outputre.search(output)
+    if not match:
+        raise ValueError("Invalid output {0}".format(output))
+    return float(match.group(1))
+
+def perfbench(repos=reposnames, name=None, params=None):
+    """decorator to declare ASV benchmark based on contrib/perf.py extension
+
+    An ASV benchmark is a python function with the given attributes:
+
+    __name__: should start with track_, time_ or mem_ to be collected by ASV
+    params and param_name: parameter matrix to display multiple graphs on the
+    same page.
+    pretty_name: If defined it's displayed in web-ui instead of __name__
+    (useful for revsets)
+    the module name is prepended to the benchmark name and displayed as
+    "category" in webui.
+
+    Benchmarks are automatically parameterized with repositories found in the
+    REPOS_DIR environment variable.
+
+    `params` is the param matrix in the form of a list of tuple
+    (param_name, [value0, value1])
+
+    For example [(x, [a, b]), (y, [c, d])] declare benchmarks for
+    (a, c), (a, d), (b, c) and (b, d).
+    """
+    params = list(params or [])
+    params.insert(0, ("repo", repos))
+
+    def decorator(func):
+        @functools.wraps(func)
+        def wrapped(repo, *args):
+            def perf(command, *a, **kw):
+                return runperfcommand(repo, command, *a, **kw)
+            return func(perf, *args)
+
+        wrapped.params = [p[1] for p in params]
+        wrapped.param_names = [p[0] for p in params]
+        wrapped.pretty_name = name
+        return wrapped
+    return decorator
diff --git a/contrib/benchmarks/perf.py b/contrib/benchmarks/perf.py
new file mode 100644
--- /dev/null
+++ b/contrib/benchmarks/perf.py
@@ -0,0 +1,26 @@ 
+# perf.py - asv benchmarks using contrib/perf.py extension
+#
+# Copyright 2016 Logilab SA <contact@logilab.fr>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from . import perfbench
+
+@perfbench()
+def track_tags(perf):
+    return perf("perftags")
+
+@perfbench()
+def track_status(perf):
+    return perf("perfstatus", unknown=False)
+
+@perfbench(params=[('rev', ['1000', '10000', 'tip'])])
+def track_manifest(perf, rev):
+    return perf("perfmanifest", rev)
+
+@perfbench()
+def track_heads(perf):
+    return perf("perfheads")
diff --git a/contrib/benchmarks/revset.py b/contrib/benchmarks/revset.py
new file mode 100644
--- /dev/null
+++ b/contrib/benchmarks/revset.py
@@ -0,0 +1,53 @@ 
+# revset.py - asv revset benchmarks
+#
+# Copyright 2016 Logilab SA <contact@logilab.fr>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''ASV revset benchmarks generated from contrib/base-revsets.txt
+
+Each revset benchmark is parameterized with variants (first, last, sort, ...)
+'''
+
+from __future__ import absolute_import
+
+import os
+import string
+import sys
+
+from . import basedir, perfbench
+
+def createrevsetbenchmark(baseset, variants=None):
+    if variants is None:
+        # Default variants
+        variants = ["plain", "first", "last", "sort", "sort+first",
+                    "sort+last"]
+    fname = "track_" + "_".join("".join([
+        c if c in string.digits + string.letters else " "
+        for c in baseset
+    ]).split())
+
+    def wrap(fname, baseset):
+        @perfbench(name=baseset, params=[("variant", variants)])
+        def f(perf, variant):
+            revset = baseset
+            if variant != "plain":
+                for var in variant.split("+"):
+                    revset = "%s(%s)" % (var, revset)
+            return perf("perfrevset", revset)
+        f.__name__ = fname
+        return f
+    return wrap(fname, baseset)
+
+def initializerevsetbenchmarks():
+    mod = sys.modules[__name__]
+    with open(os.path.join(basedir, 'contrib', 'base-revsets.txt'),
+              'rb') as fh:
+        for line in fh:
+            baseset = line.strip()
+            if baseset and not baseset.startswith('#'):
+                func = createrevsetbenchmark(baseset)
+                setattr(mod, func.__name__, func)
+
+initializerevsetbenchmarks()