new file mode 100644
@@ -0,0 +1,214 @@
+# exchangeutil.py - utily to exchange data between repo.
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+from node import hex
+import util, scmutil
+import discovery, phases, obsolete, bookmarks
+
+
+def push(repo, remote, force=False, revs=None, newbranch=False):
+ '''Push outgoing changesets (limited by revs) from the current
+ repository to remote. Return an integer:
+ - None means nothing to push
+ - 0 means HTTP error
+ - 1 means we pushed and remote head count is unchanged *or*
+ we have outgoing changesets but refused to push
+ - other values as described by addchangegroup()
+ '''
+ # there are two ways to push to remote repo:
+ #
+ # addchangegroup assumes local user can lock remote
+ # repo (local filesystem, old ssh servers).
+ #
+ # unbundle assumes local user cannot lock remote repo (new ssh
+ # servers, http servers).
+
+ if not remote.canpush():
+ raise util.Abort(_("destination does not support push"))
+ unfi = repo.unfiltered()
+ # get local lock as we might write phase data
+ locallock = repo.lock()
+ try:
+ repo.checkpush(force, revs)
+ lock = None
+ unbundle = remote.capable('unbundle')
+ if not unbundle:
+ lock = remote.lock()
+ try:
+ # discovery
+ fci = discovery.findcommonincoming
+ commoninc = fci(unfi, remote, force=force)
+ common, inc, remoteheads = commoninc
+ fco = discovery.findcommonoutgoing
+ outgoing = fco(unfi, remote, onlyheads=revs,
+ commoninc=commoninc, force=force)
+
+ if not outgoing.missing:
+ # nothing to push
+ scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
+ ret = None
+ else:
+ # something to push
+ if not force:
+ # if self.obsstore == False --> no obsolete
+ # then, save the iteration
+ if unfi.obsstore:
+ # this message are here for 80 char limit reason
+ mso = _("push includes obsolete changeset: %s!")
+ mst = "push includes %s changeset: %s!"
+ # plain versions for i18n tool to detect them
+ _("push includes unstable changeset: %s!")
+ _("push includes bumped changeset: %s!")
+ _("push includes divergent changeset: %s!")
+ # If we are to push if there is at least one
+ # obsolete or unstable changeset in missing, at
+ # least one of the missinghead will be obsolete or
+ # unstable. So checking heads only is ok
+ for node in outgoing.missingheads:
+ ctx = unfi[node]
+ if ctx.obsolete():
+ raise util.Abort(mso % ctx)
+ elif ctx.troubled():
+ raise util.Abort(_(mst)
+ % (ctx.troubles()[0],
+ ctx))
+ discovery.checkheads(unfi, remote, outgoing,
+ remoteheads, newbranch,
+ bool(inc))
+
+ # create a changegroup from local
+ if revs is None and not outgoing.excluded:
+ # push everything,
+ # use the fast path, no race possible on push
+ cg = repo._changegroup(outgoing.missing, 'push')
+ else:
+ cg = repo.getlocalbundle('push', outgoing)
+
+ # apply changegroup to remote
+ if unbundle:
+ # local repo finds heads on server, finds out what
+ # revs it must push. once revs transferred, if server
+ # finds it has different heads (someone else won
+ # commit/push race), server aborts.
+ if force:
+ remoteheads = ['force']
+ # ssh: return remote's addchangegroup()
+ # http: return remote's addchangegroup() or 0 for error
+ ret = remote.unbundle(cg, remoteheads, 'push')
+ else:
+ # we return an integer indicating remote head count
+ # change
+ ret = remote.addchangegroup(cg, 'push', repo.url())
+
+ if ret:
+ # push succeed, synchronize target of the push
+ cheads = outgoing.missingheads
+ elif revs is None:
+ # All out push fails. synchronize all common
+ cheads = outgoing.commonheads
+ else:
+ # I want cheads = heads(::missingheads and ::commonheads)
+ # (missingheads is revs with secret changeset filtered out)
+ #
+ # This can be expressed as:
+ # cheads = ( (missingheads and ::commonheads)
+ # + (commonheads and ::missingheads))"
+ # )
+ #
+ # while trying to push we already computed the following:
+ # common = (::commonheads)
+ # missing = ((commonheads::missingheads) - commonheads)
+ #
+ # We can pick:
+ # * missingheads part of common (::commonheads)
+ common = set(outgoing.common)
+ cheads = [node for node in revs if node in common]
+ # and
+ # * commonheads parents on missing
+ revset = unfi.set('%ln and parents(roots(%ln))',
+ outgoing.commonheads,
+ outgoing.missing)
+ cheads.extend(c.node() for c in revset)
+ # even when we don't push, exchanging phase data is useful
+ remotephases = remote.listkeys('phases')
+ if (repo.ui.configbool('ui', '_usedassubrepo', False)
+ and remotephases # server supports phases
+ and ret is None # nothing was pushed
+ and remotephases.get('publishing', False)):
+ # When:
+ # - this is a subrepo push
+ # - and remote support phase
+ # - and no changeset was pushed
+ # - and remote is publishing
+ # We may be in issue 3871 case!
+ # We drop the possible phase synchronisation done by
+ # courtesy to publish changesets possibly locally draft
+ # on the remote.
+ remotephases = {'publishing': 'True'}
+ if not remotephases: # old server or public only repo
+ phases.advanceboundary(repo, phases.public, cheads)
+ # don't push any phase data as there is nothing to push
+ else:
+ ana = phases.analyzeremotephases(repo, cheads, remotephases)
+ pheads, droots = ana
+ ### Apply remote phase on local
+ if remotephases.get('publishing', False):
+ phases.advanceboundary(repo, phases.public, cheads)
+ else: # publish = False
+ phases.advanceboundary(repo, phases.public, pheads)
+ phases.advanceboundary(repo, phases.draft, cheads)
+ ### Apply local phase on remote
+
+ # Get the list of all revs draft on remote by public here.
+ # XXX Beware that revset break if droots is not strictly
+ # XXX root we may want to ensure it is but it is costly
+ outdated = unfi.set('heads((%ln::%ln) and public())',
+ droots, cheads)
+ for newremotehead in outdated:
+ r = remote.pushkey('phases',
+ newremotehead.hex(),
+ str(phases.draft),
+ str(phases.public))
+ if not r:
+ repo.ui.warn(_('updating %s to public failed!\n')
+ % newremotehead)
+ repo.ui.debug('try to push obsolete markers to remote\n')
+ if (obsolete._enabled and repo.obsstore and
+ 'obsolete' in remote.listkeys('namespaces')):
+ rslts = []
+ remotedata = repo.listkeys('obsolete')
+ for key in sorted(remotedata, reverse=True):
+ # reverse sort to ensure we end with dump0
+ data = remotedata[key]
+ rslts.append(remote.pushkey('obsolete', key, '', data))
+ if [r for r in rslts if not r]:
+ msg = _('failed to push some obsolete markers!\n')
+ repo.ui.warn(msg)
+ finally:
+ if lock is not None:
+ lock.release()
+ finally:
+ locallock.release()
+
+ repo.ui.debug("checking for updated bookmarks\n")
+ rb = remote.listkeys('bookmarks')
+ for k in rb.keys():
+ if k in unfi._bookmarks:
+ nr, nl = rb[k], hex(repo._bookmarks[k])
+ if nr in unfi:
+ cr = unfi[nr]
+ cl = unfi[nl]
+ if bookmarks.validdest(unfi, cr, cl):
+ r = remote.pushkey('bookmarks', k, nr, nl)
+ if r:
+ repo.ui.status(_("updating bookmark %s\n") % k)
+ else:
+ repo.ui.warn(_('updating bookmark %s'
+ ' failed!\n') % k)
+
+ return ret
@@ -6,11 +6,11 @@
# GNU General Public License version 2 or any later version.
from node import hex, nullid, short
from i18n import _
import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
import changelog, dirstate, filelog, manifest, context, bookmarks, phases
-import lock, transaction, store, encoding, base85
+import lock, transaction, store, encoding, base85, exchangeutil
import scmutil, util, extensions, hook, error, revset
import match as matchmod
import merge as mergemod
import tags as tagsmod
from lock import release
@@ -1743,211 +1743,11 @@ class localrepository(object):
command.
"""
pass
def push(self, remote, force=False, revs=None, newbranch=False):
- '''Push outgoing changesets (limited by revs) from the current
- repository to remote. Return an integer:
- - None means nothing to push
- - 0 means HTTP error
- - 1 means we pushed and remote head count is unchanged *or*
- we have outgoing changesets but refused to push
- - other values as described by addchangegroup()
- '''
- # there are two ways to push to remote repo:
- #
- # addchangegroup assumes local user can lock remote
- # repo (local filesystem, old ssh servers).
- #
- # unbundle assumes local user cannot lock remote repo (new ssh
- # servers, http servers).
-
- if not remote.canpush():
- raise util.Abort(_("destination does not support push"))
- unfi = self.unfiltered()
- # get local lock as we might write phase data
- locallock = self.lock()
- try:
- self.checkpush(force, revs)
- lock = None
- unbundle = remote.capable('unbundle')
- if not unbundle:
- lock = remote.lock()
- try:
- # discovery
- fci = discovery.findcommonincoming
- commoninc = fci(unfi, remote, force=force)
- common, inc, remoteheads = commoninc
- fco = discovery.findcommonoutgoing
- outgoing = fco(unfi, remote, onlyheads=revs,
- commoninc=commoninc, force=force)
-
-
- if not outgoing.missing:
- # nothing to push
- scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
- ret = None
- else:
- # something to push
- if not force:
- # if self.obsstore == False --> no obsolete
- # then, save the iteration
- if unfi.obsstore:
- # this message are here for 80 char limit reason
- mso = _("push includes obsolete changeset: %s!")
- mst = "push includes %s changeset: %s!"
- # plain versions for i18n tool to detect them
- _("push includes unstable changeset: %s!")
- _("push includes bumped changeset: %s!")
- _("push includes divergent changeset: %s!")
- # If we are to push if there is at least one
- # obsolete or unstable changeset in missing, at
- # least one of the missinghead will be obsolete or
- # unstable. So checking heads only is ok
- for node in outgoing.missingheads:
- ctx = unfi[node]
- if ctx.obsolete():
- raise util.Abort(mso % ctx)
- elif ctx.troubled():
- raise util.Abort(_(mst)
- % (ctx.troubles()[0],
- ctx))
- discovery.checkheads(unfi, remote, outgoing,
- remoteheads, newbranch,
- bool(inc))
-
- # create a changegroup from local
- if revs is None and not outgoing.excluded:
- # push everything,
- # use the fast path, no race possible on push
- cg = self._changegroup(outgoing.missing, 'push')
- else:
- cg = self.getlocalbundle('push', outgoing)
-
- # apply changegroup to remote
- if unbundle:
- # local repo finds heads on server, finds out what
- # revs it must push. once revs transferred, if server
- # finds it has different heads (someone else won
- # commit/push race), server aborts.
- if force:
- remoteheads = ['force']
- # ssh: return remote's addchangegroup()
- # http: return remote's addchangegroup() or 0 for error
- ret = remote.unbundle(cg, remoteheads, 'push')
- else:
- # we return an integer indicating remote head count
- # change
- ret = remote.addchangegroup(cg, 'push', self.url())
-
- if ret:
- # push succeed, synchronize target of the push
- cheads = outgoing.missingheads
- elif revs is None:
- # All out push fails. synchronize all common
- cheads = outgoing.commonheads
- else:
- # I want cheads = heads(::missingheads and ::commonheads)
- # (missingheads is revs with secret changeset filtered out)
- #
- # This can be expressed as:
- # cheads = ( (missingheads and ::commonheads)
- # + (commonheads and ::missingheads))"
- # )
- #
- # while trying to push we already computed the following:
- # common = (::commonheads)
- # missing = ((commonheads::missingheads) - commonheads)
- #
- # We can pick:
- # * missingheads part of common (::commonheads)
- common = set(outgoing.common)
- cheads = [node for node in revs if node in common]
- # and
- # * commonheads parents on missing
- revset = unfi.set('%ln and parents(roots(%ln))',
- outgoing.commonheads,
- outgoing.missing)
- cheads.extend(c.node() for c in revset)
- # even when we don't push, exchanging phase data is useful
- remotephases = remote.listkeys('phases')
- if (self.ui.configbool('ui', '_usedassubrepo', False)
- and remotephases # server supports phases
- and ret is None # nothing was pushed
- and remotephases.get('publishing', False)):
- # When:
- # - this is a subrepo push
- # - and remote support phase
- # - and no changeset was pushed
- # - and remote is publishing
- # We may be in issue 3871 case!
- # We drop the possible phase synchronisation done by
- # courtesy to publish changesets possibly locally draft
- # on the remote.
- remotephases = {'publishing': 'True'}
- if not remotephases: # old server or public only repo
- phases.advanceboundary(self, phases.public, cheads)
- # don't push any phase data as there is nothing to push
- else:
- ana = phases.analyzeremotephases(self, cheads, remotephases)
- pheads, droots = ana
- ### Apply remote phase on local
- if remotephases.get('publishing', False):
- phases.advanceboundary(self, phases.public, cheads)
- else: # publish = False
- phases.advanceboundary(self, phases.public, pheads)
- phases.advanceboundary(self, phases.draft, cheads)
- ### Apply local phase on remote
-
- # Get the list of all revs draft on remote by public here.
- # XXX Beware that revset break if droots is not strictly
- # XXX root we may want to ensure it is but it is costly
- outdated = unfi.set('heads((%ln::%ln) and public())',
- droots, cheads)
- for newremotehead in outdated:
- r = remote.pushkey('phases',
- newremotehead.hex(),
- str(phases.draft),
- str(phases.public))
- if not r:
- self.ui.warn(_('updating %s to public failed!\n')
- % newremotehead)
- self.ui.debug('try to push obsolete markers to remote\n')
- if (obsolete._enabled and self.obsstore and
- 'obsolete' in remote.listkeys('namespaces')):
- rslts = []
- remotedata = self.listkeys('obsolete')
- for key in sorted(remotedata, reverse=True):
- # reverse sort to ensure we end with dump0
- data = remotedata[key]
- rslts.append(remote.pushkey('obsolete', key, '', data))
- if [r for r in rslts if not r]:
- msg = _('failed to push some obsolete markers!\n')
- self.ui.warn(msg)
- finally:
- if lock is not None:
- lock.release()
- finally:
- locallock.release()
-
- self.ui.debug("checking for updated bookmarks\n")
- rb = remote.listkeys('bookmarks')
- for k in rb.keys():
- if k in unfi._bookmarks:
- nr, nl = rb[k], hex(self._bookmarks[k])
- if nr in unfi:
- cr = unfi[nr]
- cl = unfi[nl]
- if bookmarks.validdest(unfi, cr, cl):
- r = remote.pushkey('bookmarks', k, nr, nl)
- if r:
- self.ui.status(_("updating bookmark %s\n") % k)
- else:
- self.ui.warn(_('updating bookmark %s'
- ' failed!\n') % k)
-
- return ret
+ return exchangeutil.push(self, remote, force, revs, newbranch)
def changegroupinfo(self, nodes, source):
if self.ui.verbose or source == 'bundle':
self.ui.status(_("%d changesets found\n") % len(nodes))
if self.ui.debugflag: