Patchwork [03,of,16] exchangeutil: turn push into an object

login
register
mail settings
Submitter Pierre-Yves David
Date April 17, 2013, 3:58 p.m.
Message ID <2aef99555e21a7364bf0.1366214318@crater1.logilab.fr>
Download mbox | patch
Permalink /patch/1392/
State Deferred, archived
Headers show

Comments

Pierre-Yves David - April 17, 2013, 3:58 p.m.
# HG changeset patch
# User Pierre-Yves David <pierre-yves.david@logilab.fr>
# Date 1366204127 -7200
#      Wed Apr 17 15:08:47 2013 +0200
# Node ID 2aef99555e21a7364bf02f3d66fe69978608d284
# Parent  4f01e4913967eb0961f07f7d77e026dbac5a439c
exchangeutil: turn push into an object

This move toward a small state-full, one-shot object where each step are isolated.
Smaller function are easier to maintains and wrap.

Patch

diff --git a/mercurial/exchangeutil.py b/mercurial/exchangeutil.py
--- a/mercurial/exchangeutil.py
+++ b/mercurial/exchangeutil.py
@@ -9,211 +9,214 @@  from i18n import _
 from node import hex, nullid
 import util, scmutil, base85
 import discovery, phases, obsolete, bookmarks
 
 
-def push(repo, remote, force=False, revs=None, newbranch=False):
-    '''Push outgoing changesets (limited by revs) from the current
-    repository to remote. Return an integer:
-      - None means nothing to push
-      - 0 means HTTP error
-      - 1 means we pushed and remote head count is unchanged *or*
-        we have outgoing changesets but refused to push
-      - other values as described by addchangegroup()
-    '''
-    # there are two ways to push to remote repo:
-    #
-    # addchangegroup assumes local user can lock remote
-    # repo (local filesystem, old ssh servers).
-    #
-    # unbundle assumes local user cannot lock remote repo (new ssh
-    # servers, http servers).
+class pushoperation(object):
+    """A object that represent a single push operation"""
 
-    if not remote.canpush():
-        raise util.Abort(_("destination does not support push"))
-    unfi = repo.unfiltered()
-    # get local lock as we might write phase data
-    locallock = repo.lock()
-    try:
-        repo.checkpush(force, revs)
-        lock = None
-        unbundle = remote.capable('unbundle')
-        if not unbundle:
-            lock = remote.lock()
+    def perform(self, repo, remote, force=False, revs=None, newbranch=False):
+        '''Push outgoing changesets (limited by revs) from the current
+        repository to remote. Return an integer:
+          - None means nothing to push
+          - 0 means HTTP error
+          - 1 means we pushed and remote head count is unchanged *or*
+            we have outgoing changesets but refused to push
+          - other values as described by addchangegroup()
+        '''
+        # there are two ways to push to remote repo:
+        #
+        # addchangegroup assumes local user can lock remote
+        # repo (local filesystem, old ssh servers).
+        #
+        # unbundle assumes local user cannot lock remote repo (new ssh
+        # servers, http servers).
+
+        if not remote.canpush():
+            raise util.Abort(_("destination does not support push"))
+        unfi = repo.unfiltered()
+        # get local lock as we might write phase data
+        locallock = repo.lock()
         try:
-            # discovery
-            fci = discovery.findcommonincoming
-            commoninc = fci(unfi, remote, force=force)
-            common, inc, remoteheads = commoninc
-            fco = discovery.findcommonoutgoing
-            outgoing = fco(unfi, remote, onlyheads=revs,
-                           commoninc=commoninc, force=force)
+            repo.checkpush(force, revs)
+            lock = None
+            unbundle = remote.capable('unbundle')
+            if not unbundle:
+                lock = remote.lock()
+            try:
+                # discovery
+                fci = discovery.findcommonincoming
+                commoninc = fci(unfi, remote, force=force)
+                common, inc, remoteheads = commoninc
+                fco = discovery.findcommonoutgoing
+                outgoing = fco(unfi, remote, onlyheads=revs,
+                               commoninc=commoninc, force=force)
 
-            if not outgoing.missing:
-                # nothing to push
-                scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
-                ret = None
-            else:
-                # something to push
-                if not force:
-                    # if self.obsstore == False --> no obsolete
-                    # then, save the iteration
-                    if unfi.obsstore:
-                        # this message are here for 80 char limit reason
-                        mso = _("push includes obsolete changeset: %s!")
-                        mst = "push includes %s changeset: %s!"
-                        # plain versions for i18n tool to detect them
-                        _("push includes unstable changeset: %s!")
-                        _("push includes bumped changeset: %s!")
-                        _("push includes divergent changeset: %s!")
-                        # If we are to push if there is at least one
-                        # obsolete or unstable changeset in missing, at
-                        # least one of the missinghead will be obsolete or
-                        # unstable. So checking heads only is ok
-                        for node in outgoing.missingheads:
-                            ctx = unfi[node]
-                            if ctx.obsolete():
-                                raise util.Abort(mso % ctx)
-                            elif ctx.troubled():
-                                raise util.Abort(_(mst)
-                                                 % (ctx.troubles()[0],
-                                                    ctx))
-                    discovery.checkheads(unfi, remote, outgoing,
-                                         remoteheads, newbranch,
-                                         bool(inc))
+                if not outgoing.missing:
+                    # nothing to push
+                    scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
+                    ret = None
+                else:
+                    # something to push
+                    if not force:
+                        # if self.obsstore == False --> no obsolete
+                        # then, save the iteration
+                        if unfi.obsstore:
+                            # this message are here for 80 char limit reason
+                            mso = _("push includes obsolete changeset: %s!")
+                            mst = "push includes %s changeset: %s!"
+                            # plain versions for i18n tool to detect them
+                            _("push includes unstable changeset: %s!")
+                            _("push includes bumped changeset: %s!")
+                            _("push includes divergent changeset: %s!")
+                            # If we are to push if there is at least one
+                            # obsolete or unstable changeset in missing, at
+                            # least one of the missinghead will be obsolete or
+                            # unstable. So checking heads only is ok
+                            for node in outgoing.missingheads:
+                                ctx = unfi[node]
+                                if ctx.obsolete():
+                                    raise util.Abort(mso % ctx)
+                                elif ctx.troubled():
+                                    raise util.Abort(_(mst)
+                                                     % (ctx.troubles()[0],
+                                                        ctx))
+                        discovery.checkheads(unfi, remote, outgoing,
+                                             remoteheads, newbranch,
+                                             bool(inc))
 
-                # create a changegroup from local
-                if revs is None and not outgoing.excluded:
-                    # push everything,
-                    # use the fast path, no race possible on push
-                    cg = repo._changegroup(outgoing.missing, 'push')
+                    # create a changegroup from local
+                    if revs is None and not outgoing.excluded:
+                        # push everything,
+                        # use the fast path, no race possible on push
+                        cg = repo._changegroup(outgoing.missing, 'push')
+                    else:
+                        cg = repo.getlocalbundle('push', outgoing)
+
+                    # apply changegroup to remote
+                    if unbundle:
+                        # local repo finds heads on server, finds out what
+                        # revs it must push. once revs transferred, if server
+                        # finds it has different heads (someone else won
+                        # commit/push race), server aborts.
+                        if force:
+                            remoteheads = ['force']
+                        # ssh: return remote's addchangegroup()
+                        # http: return remote's addchangegroup() or 0 for error
+                        ret = remote.unbundle(cg, remoteheads, 'push')
+                    else:
+                        # we return an integer indicating remote head count
+                        # change
+                        ret = remote.addchangegroup(cg, 'push', repo.url())
+
+                if ret:
+                    # push succeed, synchronize target of the push
+                    cheads = outgoing.missingheads
+                elif revs is None:
+                    # All out push fails. synchronize all common
+                    cheads = outgoing.commonheads
                 else:
-                    cg = repo.getlocalbundle('push', outgoing)
+                    # I want cheads = heads(::missingheads and ::commonheads)
+                    # (missingheads is revs with secret changeset filtered out)
+                    #
+                    # This can be expressed as:
+                    #     cheads = ( (missingheads and ::commonheads)
+                    #              + (commonheads and ::missingheads))"
+                    #              )
+                    #
+                    # while trying to push we already computed the following:
+                    #     common = (::commonheads)
+                    #     missing = ((commonheads::missingheads) - commonheads)
+                    #
+                    # We can pick:
+                    # * missingheads part of common (::commonheads)
+                    common = set(outgoing.common)
+                    cheads = [node for node in revs if node in common]
+                    # and
+                    # * commonheads parents on missing
+                    revset = unfi.set('%ln and parents(roots(%ln))',
+                                     outgoing.commonheads,
+                                     outgoing.missing)
+                    cheads.extend(c.node() for c in revset)
+                # even when we don't push, exchanging phase data is useful
+                remotephases = remote.listkeys('phases')
+                if (repo.ui.configbool('ui', '_usedassubrepo', False)
+                    and remotephases    # server supports phases
+                    and ret is None # nothing was pushed
+                    and remotephases.get('publishing', False)):
+                    # When:
+                    # - this is a subrepo push
+                    # - and remote support phase
+                    # - and no changeset was pushed
+                    # - and remote is publishing
+                    # We may be in issue 3871 case!
+                    # We drop the possible phase synchronisation done by
+                    # courtesy to publish changesets possibly locally draft
+                    # on the remote.
+                    remotephases = {'publishing': 'True'}
+                if not remotephases: # old server or public only repo
+                    phases.advanceboundary(repo, phases.public, cheads)
+                    # don't push any phase data as there is nothing to push
+                else:
+                    ana = phases.analyzeremotephases(repo, cheads, remotephases)
+                    pheads, droots = ana
+                    ### Apply remote phase on local
+                    if remotephases.get('publishing', False):
+                        phases.advanceboundary(repo, phases.public, cheads)
+                    else: # publish = False
+                        phases.advanceboundary(repo, phases.public, pheads)
+                        phases.advanceboundary(repo, phases.draft, cheads)
+                    ### Apply local phase on remote
 
-                # apply changegroup to remote
-                if unbundle:
-                    # local repo finds heads on server, finds out what
-                    # revs it must push. once revs transferred, if server
-                    # finds it has different heads (someone else won
-                    # commit/push race), server aborts.
-                    if force:
-                        remoteheads = ['force']
-                    # ssh: return remote's addchangegroup()
-                    # http: return remote's addchangegroup() or 0 for error
-                    ret = remote.unbundle(cg, remoteheads, 'push')
-                else:
-                    # we return an integer indicating remote head count
-                    # change
-                    ret = remote.addchangegroup(cg, 'push', repo.url())
+                    # Get the list of all revs draft on remote by public here.
+                    # XXX Beware that revset break if droots is not strictly
+                    # XXX root we may want to ensure it is but it is costly
+                    outdated =  unfi.set('heads((%ln::%ln) and public())',
+                                         droots, cheads)
+                    for newremotehead in outdated:
+                        r = remote.pushkey('phases',
+                                           newremotehead.hex(),
+                                           str(phases.draft),
+                                           str(phases.public))
+                        if not r:
+                            repo.ui.warn(_('updating %s to public failed!\n')
+                                            % newremotehead)
+                repo.ui.debug('try to push obsolete markers to remote\n')
+                if (obsolete._enabled and repo.obsstore and
+                    'obsolete' in remote.listkeys('namespaces')):
+                    rslts = []
+                    remotedata = repo.listkeys('obsolete')
+                    for key in sorted(remotedata, reverse=True):
+                        # reverse sort to ensure we end with dump0
+                        data = remotedata[key]
+                        rslts.append(remote.pushkey('obsolete', key, '', data))
+                    if [r for r in rslts if not r]:
+                        msg = _('failed to push some obsolete markers!\n')
+                        repo.ui.warn(msg)
+            finally:
+                if lock is not None:
+                    lock.release()
+        finally:
+            locallock.release()
 
-            if ret:
-                # push succeed, synchronize target of the push
-                cheads = outgoing.missingheads
-            elif revs is None:
-                # All out push fails. synchronize all common
-                cheads = outgoing.commonheads
-            else:
-                # I want cheads = heads(::missingheads and ::commonheads)
-                # (missingheads is revs with secret changeset filtered out)
-                #
-                # This can be expressed as:
-                #     cheads = ( (missingheads and ::commonheads)
-                #              + (commonheads and ::missingheads))"
-                #              )
-                #
-                # while trying to push we already computed the following:
-                #     common = (::commonheads)
-                #     missing = ((commonheads::missingheads) - commonheads)
-                #
-                # We can pick:
-                # * missingheads part of common (::commonheads)
-                common = set(outgoing.common)
-                cheads = [node for node in revs if node in common]
-                # and
-                # * commonheads parents on missing
-                revset = unfi.set('%ln and parents(roots(%ln))',
-                                 outgoing.commonheads,
-                                 outgoing.missing)
-                cheads.extend(c.node() for c in revset)
-            # even when we don't push, exchanging phase data is useful
-            remotephases = remote.listkeys('phases')
-            if (repo.ui.configbool('ui', '_usedassubrepo', False)
-                and remotephases    # server supports phases
-                and ret is None # nothing was pushed
-                and remotephases.get('publishing', False)):
-                # When:
-                # - this is a subrepo push
-                # - and remote support phase
-                # - and no changeset was pushed
-                # - and remote is publishing
-                # We may be in issue 3871 case!
-                # We drop the possible phase synchronisation done by
-                # courtesy to publish changesets possibly locally draft
-                # on the remote.
-                remotephases = {'publishing': 'True'}
-            if not remotephases: # old server or public only repo
-                phases.advanceboundary(repo, phases.public, cheads)
-                # don't push any phase data as there is nothing to push
-            else:
-                ana = phases.analyzeremotephases(repo, cheads, remotephases)
-                pheads, droots = ana
-                ### Apply remote phase on local
-                if remotephases.get('publishing', False):
-                    phases.advanceboundary(repo, phases.public, cheads)
-                else: # publish = False
-                    phases.advanceboundary(repo, phases.public, pheads)
-                    phases.advanceboundary(repo, phases.draft, cheads)
-                ### Apply local phase on remote
+        repo.ui.debug("checking for updated bookmarks\n")
+        rb = remote.listkeys('bookmarks')
+        for k in rb.keys():
+            if k in unfi._bookmarks:
+                nr, nl = rb[k], hex(repo._bookmarks[k])
+                if nr in unfi:
+                    cr = unfi[nr]
+                    cl = unfi[nl]
+                    if bookmarks.validdest(unfi, cr, cl):
+                        r = remote.pushkey('bookmarks', k, nr, nl)
+                        if r:
+                            repo.ui.status(_("updating bookmark %s\n") % k)
+                        else:
+                            repo.ui.warn(_('updating bookmark %s'
+                                           ' failed!\n') % k)
 
-                # Get the list of all revs draft on remote by public here.
-                # XXX Beware that revset break if droots is not strictly
-                # XXX root we may want to ensure it is but it is costly
-                outdated =  unfi.set('heads((%ln::%ln) and public())',
-                                     droots, cheads)
-                for newremotehead in outdated:
-                    r = remote.pushkey('phases',
-                                       newremotehead.hex(),
-                                       str(phases.draft),
-                                       str(phases.public))
-                    if not r:
-                        repo.ui.warn(_('updating %s to public failed!\n')
-                                        % newremotehead)
-            repo.ui.debug('try to push obsolete markers to remote\n')
-            if (obsolete._enabled and repo.obsstore and
-                'obsolete' in remote.listkeys('namespaces')):
-                rslts = []
-                remotedata = repo.listkeys('obsolete')
-                for key in sorted(remotedata, reverse=True):
-                    # reverse sort to ensure we end with dump0
-                    data = remotedata[key]
-                    rslts.append(remote.pushkey('obsolete', key, '', data))
-                if [r for r in rslts if not r]:
-                    msg = _('failed to push some obsolete markers!\n')
-                    repo.ui.warn(msg)
-        finally:
-            if lock is not None:
-                lock.release()
-    finally:
-        locallock.release()
-
-    repo.ui.debug("checking for updated bookmarks\n")
-    rb = remote.listkeys('bookmarks')
-    for k in rb.keys():
-        if k in unfi._bookmarks:
-            nr, nl = rb[k], hex(repo._bookmarks[k])
-            if nr in unfi:
-                cr = unfi[nr]
-                cl = unfi[nl]
-                if bookmarks.validdest(unfi, cr, cl):
-                    r = remote.pushkey('bookmarks', k, nr, nl)
-                    if r:
-                        repo.ui.status(_("updating bookmark %s\n") % k)
-                    else:
-                        repo.ui.warn(_('updating bookmark %s'
-                                       ' failed!\n') % k)
-
-    return ret
+        return ret
 
 def pull(repo, remote, heads=None, force=False):
     # don't open transaction for nothing or you break future useful
     # rollback call
     tr = None
diff --git a/mercurial/localrepo.py b/mercurial/localrepo.py
--- a/mercurial/localrepo.py
+++ b/mercurial/localrepo.py
@@ -1660,11 +1660,12 @@  class localrepository(object):
         command.
         """
         pass
 
     def push(self, remote, force=False, revs=None, newbranch=False):
-        return exchangeutil.push(self, remote, force, revs, newbranch)
+        push = exchangeutil.pushoperation()
+        return push.perform(self, remote, force, revs, newbranch)
 
     def changegroupinfo(self, nodes, source):
         if self.ui.verbose or source == 'bundle':
             self.ui.status(_("%d changesets found\n") % len(nodes))
         if self.ui.debugflag: