Submitter | Anurag Goel |
---|---|
Date | Aug. 1, 2014, 5:49 p.m. |
Message ID | <425ed96bee462e1622ab.1406915368@ubuntu.ubuntu-domain> |
Download | mbox | patch |
Permalink | /patch/5217/ |
State | Changes Requested |
Headers | show |
Comments
On 08/01/2014 10:49 AM, Anurag Goel wrote: > # HG changeset patch > # User anuraggoel <anurag.dsps@gmail.com> > # Date 1404189244 -19800 > # Tue Jul 01 10:04:04 2014 +0530 > # Node ID 425ed96bee462e1622ab4ec5c71fc27a11171480 > # Parent 40af42b473ba636bb1e4dff37a82df344f8f27b3 > run-tests: added '--json' functionality to store test result data in json file > > This patch added a new functionality '--json'. While testing, if '--json' > is enabled then test result data gets stored in newly created "report.json" > file in the following format. > > testreport ={ > "test-success.t": { > "result": "success", > "time": "2.04" > } > "test-failure.t": { > "result": "failure", > "time": "4.43" > } > "test-skip.t": { > "result": "skip" > "time": "3.54" > } > > This "report.json" file will further accessed by html/javascript file for > graph usage. > > diff -r 40af42b473ba -r 425ed96bee46 tests/run-tests.py > --- a/tests/run-tests.py Thu Jun 26 22:08:30 2014 +0530 > +++ b/tests/run-tests.py Tue Jul 01 10:04:04 2014 +0530 > @@ -58,6 +58,7 @@ > import killdaemons as killmod > import Queue as queue > import unittest > +import simplejson as json > > processlock = threading.Lock() > > @@ -185,6 +186,8 @@ > " (default: $%s or %d)" % defaults['timeout']) > parser.add_option("--time", action="store_true", > help="time how long each test takes") > + parser.add_option("--json", action="store_true", > + help="store test result data in 'report.json' file") > parser.add_option("--tmpdir", type="string", > help="run tests in the given temporary directory" > " (implies --keep-tmpdir)") > @@ -453,6 +456,9 @@ > return > > success = False > + > + # This carries the test success info corresponding to the testcase > + successinfo = 'failure' > try: > self.runTest() > except KeyboardInterrupt: > @@ -460,6 +466,7 @@ > raise > except SkipTest, e: > result.addSkip(self, str(e)) > + successinfo = 'skip' > except IgnoreTest, e: > result.addIgnore(self, str(e)) > except WarnTest, e: > @@ -486,9 +493,10 @@ > success = False > > if success: > + successinfo = 'success' > result.addSuccess(self) > finally: > - result.stopTest(self, interrupted=self._aborted) > + result.stopTest(self, successinfo, interrupted=self._aborted) This sounds suboptimal. The result object should -already- be able to store all informations necessary to produce the json. It is able to do so, because it is a the -result- object and that all tests related result are recorded in it. > > def runTest(self): > """Run this test instance. > @@ -1075,6 +1083,10 @@ > self.warned = [] > > self.times = [] > + > + # Stores success info and timing data corresponding to each testcase > + self.outcome = [] > + > self._started = {} > > def addFailure(self, test, reason): > @@ -1167,10 +1179,15 @@ > > self._started[test.name] = time.time() > > - def stopTest(self, test, interrupted=False): > + def stopTest(self, test, successinfo, interrupted=False): > super(TestResult, self).stopTest(test) > > - self.times.append((test.name, time.time() - self._started[test.name])) > + testtime = time.time() - self._started[test.name] > + > + testresult = {'result': successinfo, 'time': ('%0.2f' % (testtime))} > + self.outcome.append((test.name, testresult)) > + > + self.times.append((test.name, testtime)) Second facette of the same problem. If you already have the timing and result information for all tests, you can generate the json content -on-demand- when requested to. You can produce the json from all the data stored in the result object itself. (bonus review: TestResult.stopTest is a method from the python stdlib. changing its signature kind-of defeat the purpose of using standard object) > del self._started[test.name] > > if interrupted: > @@ -1341,9 +1358,23 @@ > os.environ['PYTHONHASHSEED']) > if self._runner.options.time: > self.printtimes(result.times) > + if self._runner.options.json: > + self.getjsonfile(result.outcome) > > return result > > + def getjsonfile(self, outcome): > + """Store test result info in json format in report.json file.""" > + > + os.chdir(self._runner._testdir) > + fp = open('report.json', 'w') > + try: > + testdata = dict(outcome) > + fp.writelines(("testreport =", (json.dumps(testdata, > + sort_keys=True, indent=4)))) > + finally: > + fp.close() > + > def printtimes(self, times): > self.stream.writeln('# Producing time report') > times.sort(key=lambda t: (t[1], t[0]), reverse=True) > diff -r 40af42b473ba -r 425ed96bee46 tests/test-run-tests.t > --- a/tests/test-run-tests.t Thu Jun 26 22:08:30 2014 +0530 > +++ b/tests/test-run-tests.t Tue Jul 01 10:04:04 2014 +0530 > @@ -201,3 +201,37 @@ > # Ran 2 tests, 0 skipped, 0 warned, 1 failed. > python hash seed: * (glob) > [1] > + > +test for --json > +================== > + > + $ $TESTDIR/run-tests.py test-success.t test-failure.t --json > + > + --- $TESTTMP/test-failure.t > + +++ $TESTTMP/test-failure.t.err > + @@ -1,2 +1,2 @@ > + $ echo babar > + - rataxes > + + babar > + > + ERROR: test-failure.t output changed > + !. > + Failed test-failure.t: output changed > + # Ran 2 tests, 0 skipped, 0 warned, 1 failed. > + python hash seed: * (glob) > + [1] > + > + $ cat report.json > + testreport ={ > + "test-failure.t": [\{] (re) > + "result": "failure", > + "time": "[\d\.]{4}" (re) > + }, > + "test-success.t": [\{] (re) > + "result": "success", > + "time": "[\d\.]{4}" (re) > + } > + } (no-eol) > + > +(removing json file) > + $ rm report.json > _______________________________________________ > Mercurial-devel mailing list > Mercurial-devel@selenic.com > http://selenic.com/mailman/listinfo/mercurial-devel >
On 8/1/14, 10:49 AM, Anurag Goel wrote: > @@ -1341,9 +1358,23 @@ > os.environ['PYTHONHASHSEED']) > if self._runner.options.time: > self.printtimes(result.times) > + if self._runner.options.json: > + self.getjsonfile(result.outcome) > > return result > > + def getjsonfile(self, outcome): > + """Store test result info in json format in report.json file.""" > + > + os.chdir(self._runner._testdir) This is a needless chdir. Instead, open(os.path.join(self._runner._testdir, 'report.json')), 'w') > + fp = open('report.json', 'w') > + try: > + testdata = dict(outcome) > + fp.writelines(("testreport =", (json.dumps(testdata, > + sort_keys=True, indent=4)))) > + finally: > + fp.close() > +
On Mon, Aug 4, 2014 at 11:50 PM, Pierre-Yves David < pierre-yves.david@ens-lyon.org> wrote: > > > On 08/01/2014 10:49 AM, Anurag Goel wrote: > >> # HG changeset patch >> # User anuraggoel <anurag.dsps@gmail.com> >> # Date 1404189244 -19800 >> # Tue Jul 01 10:04:04 2014 +0530 >> # Node ID 425ed96bee462e1622ab4ec5c71fc27a11171480 >> # Parent 40af42b473ba636bb1e4dff37a82df344f8f27b3 >> run-tests: added '--json' functionality to store test result data in json >> file >> >> This patch added a new functionality '--json'. While testing, if '--json' >> is enabled then test result data gets stored in newly created >> "report.json" >> file in the following format. >> >> testreport ={ >> "test-success.t": { >> "result": "success", >> "time": "2.04" >> } >> "test-failure.t": { >> "result": "failure", >> "time": "4.43" >> } >> "test-skip.t": { >> "result": "skip" >> "time": "3.54" >> } >> >> This "report.json" file will further accessed by html/javascript file for >> graph usage. >> >> diff -r 40af42b473ba -r 425ed96bee46 tests/run-tests.py >> --- a/tests/run-tests.py Thu Jun 26 22:08:30 2014 +0530 >> +++ b/tests/run-tests.py Tue Jul 01 10:04:04 2014 +0530 >> @@ -58,6 +58,7 @@ >> import killdaemons as killmod >> import Queue as queue >> import unittest >> +import simplejson as json >> >> processlock = threading.Lock() >> >> @@ -185,6 +186,8 @@ >> " (default: $%s or %d)" % defaults['timeout']) >> parser.add_option("--time", action="store_true", >> help="time how long each test takes") >> + parser.add_option("--json", action="store_true", >> + help="store test result data in 'report.json' file") >> parser.add_option("--tmpdir", type="string", >> help="run tests in the given temporary directory" >> " (implies --keep-tmpdir)") >> @@ -453,6 +456,9 @@ >> return >> >> success = False >> + >> + # This carries the test success info corresponding to the >> testcase >> + successinfo = 'failure' >> try: >> self.runTest() >> except KeyboardInterrupt: >> @@ -460,6 +466,7 @@ >> raise >> except SkipTest, e: >> result.addSkip(self, str(e)) >> + successinfo = 'skip' >> except IgnoreTest, e: >> result.addIgnore(self, str(e)) >> except WarnTest, e: >> @@ -486,9 +493,10 @@ >> success = False >> >> if success: >> + successinfo = 'success' >> result.addSuccess(self) >> finally: >> - result.stopTest(self, interrupted=self._aborted) >> + result.stopTest(self, successinfo, interrupted=self._aborted) >> > > This sounds suboptimal. The result object should -already- be able to > store all informations necessary to produce the json. > > It is able to do so, because it is a the -result- object and that all > tests related result are recorded in it. > > >> def runTest(self): >> """Run this test instance. >> @@ -1075,6 +1083,10 @@ >> self.warned = [] >> >> self.times = [] >> + >> + # Stores success info and timing data corresponding to each >> testcase >> + self.outcome = [] >> + >> self._started = {} >> >> def addFailure(self, test, reason): >> @@ -1167,10 +1179,15 @@ >> >> self._started[test.name] = time.time() >> >> - def stopTest(self, test, interrupted=False): >> + def stopTest(self, test, successinfo, interrupted=False): >> super(TestResult, self).stopTest(test) >> >> - self.times.append((test.name, time.time() - self._started[ >> test.name])) >> + testtime = time.time() - self._started[test.name] >> + >> + testresult = {'result': successinfo, 'time': ('%0.2f' % >> (testtime))} >> + self.outcome.append((test.name, testresult)) >> + >> + self.times.append((test.name, testtime)) >> > > Could you please tell me how can i access the "successinfo" of a test file by using the result object here ? Second facette of the same problem. If you already have the timing and > result information for all tests, you can generate the json content > -on-demand- when requested to. > > You can produce the json from all the data stored in the result object > itself. > > (bonus review: TestResult.stopTest is a method from the python stdlib. > changing its signature kind-of defeat the purpose of using standard object) > > del self._started[test.name] >> >> if interrupted: >> @@ -1341,9 +1358,23 @@ >> os.environ['PYTHONHASHSEED']) >> if self._runner.options.time: >> self.printtimes(result.times) >> + if self._runner.options.json: >> + self.getjsonfile(result.outcome) >> >> return result >> >> + def getjsonfile(self, outcome): >> + """Store test result info in json format in report.json file.""" >> + >> + os.chdir(self._runner._testdir) >> + fp = open('report.json', 'w') >> + try: >> + testdata = dict(outcome) >> + fp.writelines(("testreport =", (json.dumps(testdata, >> + sort_keys=True, indent=4)))) >> + finally: >> + fp.close() >> + >> def printtimes(self, times): >> self.stream.writeln('# Producing time report') >> times.sort(key=lambda t: (t[1], t[0]), reverse=True) >> diff -r 40af42b473ba -r 425ed96bee46 tests/test-run-tests.t >> --- a/tests/test-run-tests.t Thu Jun 26 22:08:30 2014 +0530 >> +++ b/tests/test-run-tests.t Tue Jul 01 10:04:04 2014 +0530 >> @@ -201,3 +201,37 @@ >> # Ran 2 tests, 0 skipped, 0 warned, 1 failed. >> python hash seed: * (glob) >> [1] >> + >> +test for --json >> +================== >> + >> + $ $TESTDIR/run-tests.py test-success.t test-failure.t --json >> + >> + --- $TESTTMP/test-failure.t >> + +++ $TESTTMP/test-failure.t.err >> + @@ -1,2 +1,2 @@ >> + $ echo babar >> + - rataxes >> + + babar >> + >> + ERROR: test-failure.t output changed >> + !. >> + Failed test-failure.t: output changed >> + # Ran 2 tests, 0 skipped, 0 warned, 1 failed. >> + python hash seed: * (glob) >> + [1] >> + >> + $ cat report.json >> + testreport ={ >> + "test-failure.t": [\{] (re) >> + "result": "failure", >> + "time": "[\d\.]{4}" (re) >> + }, >> + "test-success.t": [\{] (re) >> + "result": "success", >> + "time": "[\d\.]{4}" (re) >> + } >> + } (no-eol) >> + >> +(removing json file) >> + $ rm report.json >> _______________________________________________ >> Mercurial-devel mailing list >> Mercurial-devel@selenic.com >> http://selenic.com/mailman/listinfo/mercurial-devel >> >> > -- > Pierre-Yves David >
Patch
diff -r 40af42b473ba -r 425ed96bee46 tests/run-tests.py --- a/tests/run-tests.py Thu Jun 26 22:08:30 2014 +0530 +++ b/tests/run-tests.py Tue Jul 01 10:04:04 2014 +0530 @@ -58,6 +58,7 @@ import killdaemons as killmod import Queue as queue import unittest +import simplejson as json processlock = threading.Lock() @@ -185,6 +186,8 @@ " (default: $%s or %d)" % defaults['timeout']) parser.add_option("--time", action="store_true", help="time how long each test takes") + parser.add_option("--json", action="store_true", + help="store test result data in 'report.json' file") parser.add_option("--tmpdir", type="string", help="run tests in the given temporary directory" " (implies --keep-tmpdir)") @@ -453,6 +456,9 @@ return success = False + + # This carries the test success info corresponding to the testcase + successinfo = 'failure' try: self.runTest() except KeyboardInterrupt: @@ -460,6 +466,7 @@ raise except SkipTest, e: result.addSkip(self, str(e)) + successinfo = 'skip' except IgnoreTest, e: result.addIgnore(self, str(e)) except WarnTest, e: @@ -486,9 +493,10 @@ success = False if success: + successinfo = 'success' result.addSuccess(self) finally: - result.stopTest(self, interrupted=self._aborted) + result.stopTest(self, successinfo, interrupted=self._aborted) def runTest(self): """Run this test instance. @@ -1075,6 +1083,10 @@ self.warned = [] self.times = [] + + # Stores success info and timing data corresponding to each testcase + self.outcome = [] + self._started = {} def addFailure(self, test, reason): @@ -1167,10 +1179,15 @@ self._started[test.name] = time.time() - def stopTest(self, test, interrupted=False): + def stopTest(self, test, successinfo, interrupted=False): super(TestResult, self).stopTest(test) - self.times.append((test.name, time.time() - self._started[test.name])) + testtime = time.time() - self._started[test.name] + + testresult = {'result': successinfo, 'time': ('%0.2f' % (testtime))} + self.outcome.append((test.name, testresult)) + + self.times.append((test.name, testtime)) del self._started[test.name] if interrupted: @@ -1341,9 +1358,23 @@ os.environ['PYTHONHASHSEED']) if self._runner.options.time: self.printtimes(result.times) + if self._runner.options.json: + self.getjsonfile(result.outcome) return result + def getjsonfile(self, outcome): + """Store test result info in json format in report.json file.""" + + os.chdir(self._runner._testdir) + fp = open('report.json', 'w') + try: + testdata = dict(outcome) + fp.writelines(("testreport =", (json.dumps(testdata, + sort_keys=True, indent=4)))) + finally: + fp.close() + def printtimes(self, times): self.stream.writeln('# Producing time report') times.sort(key=lambda t: (t[1], t[0]), reverse=True) diff -r 40af42b473ba -r 425ed96bee46 tests/test-run-tests.t --- a/tests/test-run-tests.t Thu Jun 26 22:08:30 2014 +0530 +++ b/tests/test-run-tests.t Tue Jul 01 10:04:04 2014 +0530 @@ -201,3 +201,37 @@ # Ran 2 tests, 0 skipped, 0 warned, 1 failed. python hash seed: * (glob) [1] + +test for --json +================== + + $ $TESTDIR/run-tests.py test-success.t test-failure.t --json + + --- $TESTTMP/test-failure.t + +++ $TESTTMP/test-failure.t.err + @@ -1,2 +1,2 @@ + $ echo babar + - rataxes + + babar + + ERROR: test-failure.t output changed + !. + Failed test-failure.t: output changed + # Ran 2 tests, 0 skipped, 0 warned, 1 failed. + python hash seed: * (glob) + [1] + + $ cat report.json + testreport ={ + "test-failure.t": [\{] (re) + "result": "failure", + "time": "[\d\.]{4}" (re) + }, + "test-success.t": [\{] (re) + "result": "success", + "time": "[\d\.]{4}" (re) + } + } (no-eol) + +(removing json file) + $ rm report.json