Patchwork [v2] run-tests: add support for xunit test reports

login
register
mail settings
Submitter Augie Fackler
Date Aug. 6, 2014, 2:15 a.m.
Message ID <049f590f1fd436b572d3.1407291301@101.17.16.172.in-addr.arpa>
Download mbox | patch
Permalink /patch/5282/
State Accepted
Headers show

Comments

Augie Fackler - Aug. 6, 2014, 2:15 a.m.
# HG changeset patch
# User Augie Fackler <raf@durin42.com>
# Date 1407287831 14400
#      Tue Aug 05 21:17:11 2014 -0400
# Node ID 049f590f1fd436b572d3b953792424251b53ed35
# Parent  739095270f48576711e7469cb9504b2ecc0f217b
run-tests: add support for xunit test reports

The Jenkins CI system understands xunit reports natively, so this will
be helpful for anyone that wants to use Jenkins for testing hg or
extensions that use run-tests.py for their testing.
Matt Mackall - Aug. 6, 2014, 8:08 a.m.
On Tue, 2014-08-05 at 22:15 -0400, Augie Fackler wrote:
> # HG changeset patch
> # User Augie Fackler <raf@durin42.com>
> # Date 1407287831 14400
> #      Tue Aug 05 21:17:11 2014 -0400
> # Node ID 049f590f1fd436b572d3b953792424251b53ed35
> # Parent  739095270f48576711e7469cb9504b2ecc0f217b
> run-tests: add support for xunit test reports
> 
> The Jenkins CI system understands xunit reports natively, so this will
> be helpful for anyone that wants to use Jenkins for testing hg or
> extensions that use run-tests.py for their testing.

Queued for default, thanks.

Patch

diff --git a/tests/run-tests.py b/tests/run-tests.py
--- a/tests/run-tests.py
+++ b/tests/run-tests.py
@@ -57,6 +57,7 @@ 
 import threading
 import killdaemons as killmod
 import Queue as queue
+from xml.dom import minidom
 import unittest
 
 processlock = threading.Lock()
@@ -190,6 +191,8 @@ 
              " (implies --keep-tmpdir)")
     parser.add_option("-v", "--verbose", action="store_true",
         help="output verbose messages")
+    parser.add_option("--xunit", type="string",
+                      help="record xunit results at specified path")
     parser.add_option("--view", type="string",
         help="external diff viewer")
     parser.add_option("--with-hg", type="string",
@@ -304,6 +307,20 @@ 
 
     return log(*msg)
 
+# Bytes that break XML even in a CDATA block: control characters 0-31
+# sans \t, \n and \r
+CDATA_EVIL = re.compile(r"[\000-\010\013\014\016-\037]")
+
+def cdatasafe(data):
+    """Make a string safe to include in a CDATA block.
+
+    Certain control characters are illegal in a CDATA block, and
+    there's no way to include a ]]> in a CDATA either. This function
+    replaces illegal bytes with ? and adds a space between the ]] so
+    that it won't break the CDATA block.
+    """
+    return CDATA_EVIL.sub('?', data).replace(']]>', '] ]>')
+
 def log(*msg):
     """Log something to stdout.
 
@@ -1085,6 +1102,9 @@ 
         self.times = []
         self._started = {}
         self._stopped = {}
+        # Data stored for the benefit of generating xunit reports.
+        self.successes = []
+        self.faildata = {}
 
     def addFailure(self, test, reason):
         self.failures.append((test, reason))
@@ -1099,9 +1119,12 @@ 
             self.stream.write('!')
             iolock.release()
 
-    def addError(self, *args, **kwargs):
-        super(TestResult, self).addError(*args, **kwargs)
+    def addSuccess(self, test):
+        super(TestResult, self).addSuccess(test)
+        self.successes.append(test)
 
+    def addError(self, test, err):
+        super(TestResult, self).addError(test, err)
         if self._options.first:
             self.stop()
 
@@ -1141,6 +1164,8 @@ 
         """Record a mismatch in test output for a particular test."""
 
         accepted = False
+        failed = False
+        lines = []
 
         iolock.acquire()
         if self._options.nodiff:
@@ -1169,7 +1194,8 @@ 
                     else:
                         rename(test.errpath, '%s.out' % test.path)
                     accepted = True
-
+            if not accepted and not failed:
+                self.faildata[test.name] = ''.join(lines)
         iolock.release()
 
         return accepted
@@ -1344,6 +1370,35 @@ 
         for test, msg in result.errors:
             self.stream.writeln('Errored %s: %s' % (test.name, msg))
 
+        if self._runner.options.xunit:
+            xuf = open(self._runner.options.xunit, 'wb')
+            try:
+                timesd = dict(
+                    (test, real) for test, cuser, csys, real in result.times)
+                doc = minidom.Document()
+                s = doc.createElement('testsuite')
+                s.setAttribute('name', 'run-tests')
+                s.setAttribute('tests', str(result.testsRun))
+                s.setAttribute('errors', "0") # TODO
+                s.setAttribute('failures', str(failed))
+                s.setAttribute('skipped', str(skipped + ignored))
+                doc.appendChild(s)
+                for tc in result.successes:
+                    t = doc.createElement('testcase')
+                    t.setAttribute('name', tc.name)
+                    t.setAttribute('time', '%.3f' % timesd[tc.name])
+                    s.appendChild(t)
+                for tc, err in sorted(result.faildata.iteritems()):
+                    t = doc.createElement('testcase')
+                    t.setAttribute('name', tc)
+                    t.setAttribute('time', '%.3f' % timesd[tc])
+                    cd = doc.createCDATASection(cdatasafe(err))
+                    t.appendChild(cd)
+                    s.appendChild(t)
+                xuf.write(doc.toprettyxml(indent='  ', encoding='utf-8'))
+            finally:
+                xuf.close()
+
         self._runner._checkhglib('Tested')
 
         self.stream.writeln('# Ran %d tests, %d skipped, %d warned, %d failed.'
diff --git a/tests/test-run-tests.t b/tests/test-run-tests.t
--- a/tests/test-run-tests.t
+++ b/tests/test-run-tests.t
@@ -48,6 +48,39 @@ 
   # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
   python hash seed: * (glob)
   [1]
+test --xunit support
+  $ $TESTDIR/run-tests.py --with-hg=`which hg` --xunit=xunit.xml
+  
+  --- $TESTTMP/test-failure.t
+  +++ $TESTTMP/test-failure.t.err
+  @@ -1,4 +1,4 @@
+     $ echo babar
+  -  rataxes
+  +  babar
+   This is a noop statement so that
+   this test is still more bytes than success.
+  
+  ERROR: test-failure.t output changed
+  !.
+  Failed test-failure.t: output changed
+  # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
+  python hash seed: * (glob)
+  [1]
+  $ cat xunit.xml
+  <?xml version="1.0" encoding="utf-8"?>
+  <testsuite errors="0" failures="1" name="run-tests" skipped="0" tests="2">
+    <testcase name="test-success.t" time="*"/> (glob)
+    <testcase name="test-failure.t" time="*"> (glob)
+  <![CDATA[--- $TESTTMP/test-failure.t
+  +++ $TESTTMP/test-failure.t.err
+  @@ -1,4 +1,4 @@
+     $ echo babar
+  -  rataxes
+  +  babar
+   This is a noop statement so that
+   this test is still more bytes than success.
+  ]]>  </testcase>
+  </testsuite>
 
 test for --retest
 ====================
@@ -291,6 +324,18 @@ 
   Skipped test-skip.t: irrelevant
   # Ran 1 tests, 2 skipped, 0 warned, 0 failed.
 
+Skips with xml
+  $ $TESTDIR/run-tests.py --with-hg=`which hg` --keyword xyzzy \
+  >  --xunit=xunit.xml
+  i.s
+  Skipped test-skip.t: irrelevant
+  # Ran 1 tests, 2 skipped, 0 warned, 0 failed.
+  $ cat xunit.xml
+  <?xml version="1.0" encoding="utf-8"?>
+  <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="1">
+    <testcase name="test-success.t" time="0.010"/>
+  </testsuite>
+
 Missing skips or blacklisted skips don't count as executed:
   $ echo test-failure.t > blacklist
   $ $TESTDIR/run-tests.py --with-hg=`which hg` --blacklist=blacklist \