Patchwork [1,of,2] tags: log events related to tags cache

mail settings
Submitter Gregory Szorc
Date March 25, 2014, 4:28 a.m.
Message ID <>
Download mbox | patch
Permalink /patch/4055/
State Superseded
Commit 9ea132aee96ca0817d55e72013b9cfc644dc0088
Headers show


Gregory Szorc - March 25, 2014, 4:28 a.m.
# HG changeset patch
# User Gregory Szorc <>
# Date 1395532277 25200
#      Sat Mar 22 16:51:17 2014 -0700
# Node ID 2c1c27fd67fb5be2a35a9906de07ad2610ce5fd6
# Parent  3879ac3858ffd9bb46e19fcc3a2b31d7bb2b54c5
tags: log events related to tags cache

We now log when .hg/cache/tags data is built from scratch by reading
manifests and when the file is written.


diff --git a/mercurial/ b/mercurial/
--- a/mercurial/
+++ b/mercurial/
@@ -10,16 +10,17 @@ 
 # Eventually, it could take care of updating (adding/removing/moving)
 # tags too.
 from node import nullid, bin, hex, short
 from i18n import _
 import encoding
 import error
 import errno
+import time
 def findglobaltags(ui, repo, alltags, tagtypes):
     '''Find global tags in repo by reading .hgtags from every head that
     has a distinct version of it, using a cache to avoid excess work.
     Updates the dicts alltags, tagtypes in place: alltags maps tag name
     to (node, hist) pair (see _readtags() below), and tagtypes maps tag
     name to tag type ("global" in this case).'''
     # This is so we can be lazy and assume alltags contains only global
@@ -229,16 +230,18 @@  def _readtagcache(ui, repo):
     # N.B. in case 4 (nodes destroyed), "new head" really means "newly
     # exposed".
     if not len(repo.file('.hgtags')):
         # No tags have ever been committed, so we can avoid a
         # potentially expensive search.
         return (repoheads, cachefnode, None, True)
+    starttime = time.time()
     newheads = [head
                 for head in repoheads
                 if head not in set(cacheheads)]
     # Now we have to lookup the .hgtags filenode for every new head.
     # This is the most expensive part of finding tags, so performance
     # depends primarily on the size of newheads.  Worst case: no cache
     # file, so newheads == repoheads.
@@ -246,27 +249,35 @@  def _readtagcache(ui, repo):
         cctx = repo[head]
             fnode = cctx.filenode('.hgtags')
             cachefnode[head] = fnode
         except error.LookupError:
             # no .hgtags file on this head
+    duration = time.time() - starttime
+    ui.log('tagscache',
+           'resolved %d tags cache entries from %d manifests in %0.2f seconds\n',
+           len(cachefnode), len(newheads), duration)
     # Caller has to iterate over all heads, but can use the filenodes in
     # cachefnode to get to each .hgtags revision quickly.
     return (repoheads, cachefnode, None, True)
 def _writetagcache(ui, repo, heads, tagfnode, cachetags):
         cachefile = repo.opener('cache/tags', 'w', atomictemp=True)
     except (OSError, IOError):
+    ui.log('tagscache', 'writing tags cache file with %d heads and %d tags\n',
+            len(heads), len(cachetags))
     realheads = repo.heads()            # for sanity checks below
     for head in heads:
         # temporary sanity checks; these can probably be removed
         # once this code has been in crew for a few weeks
         assert head in repo.changelog.nodemap, \
                'trying to write non-existent node %s to tag cache' % short(head)
         assert head in realheads, \
                'trying to write non-head %s to tag cache' % short(head)