1# 2# This program is free software; you can redistribute it and/or modify 3# it under the terms of the GNU General Public License version 2 4# as published by the Free Software Foundation. 5# 6# This program is distributed in the hope that it will be useful, 7# but WITHOUT ANY WARRANTY; without even the implied warranty of 8# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9# GNU General Public License for more details. 10# 11# You should have received a copy of the GNU General Public License 12# along with this program; if not, write to the Free Software 13# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 14# 15 16# 17# Copyright 2009 Sun Microsystems, Inc. All rights reserved. 18# Use is subject to license terms. 19# 20# Copyright 2008, 2011, Richard Lowe 21# 22 23 24''' 25Workspace backup 26 27Backup format is: 28 backupdir/ 29 wsname/ 30 generation#/ 31 dirstate (handled by CdmUncommittedBackup) 32 File containing dirstate nodeid (the changeset we need 33 to update the workspace to after applying the bundle). 34 This is the node to which the working copy changes 35 (see 'diff', below) will be applied if applicable. 36 37 bundle (handled by CdmCommittedBackup) 38 An Hg bundle containing outgoing committed changes. 39 40 nodes (handled by CdmCommittedBackup) 41 A text file listing the full (hex) nodeid of all nodes in 42 bundle, used by need_backup. 43 44 diff (handled by CdmUncommittedBackup) 45 A Git-formatted diff containing uncommitted changes. 46 47 renames (handled by CdmUncommittedBackup) 48 A list of renames in the working copy that have to be 49 applied manually, rather than by the diff. 50 51 metadata.tar.gz (handled by CdmMetadataBackup) 52 $CODEMGR_WS/.hg/hgrc 53 $CODEMGR_WS/.hg/localtags 54 $CODEMGR_WS/.hg/patches (Mq data) 55 56 clear.tar.gz (handled by CdmClearBackup) 57 <short node>/ 58 copies of each modified or added file, as it is in 59 this head. 60 61 ... for each outgoing head 62 63 working/ 64 copies of each modified or added file in the 65 working copy if any. 66 67 latest -> generation# 68 Newest backup generation. 69 70All files in a given backup generation, with the exception of 71dirstate, are optional. 72''' 73 74import grp, os, pwd, shutil, tarfile, time, traceback 75from cStringIO import StringIO 76 77from mercurial import changegroup, error, node, patch, util 78 79 80class CdmNodeMissing(util.Abort): 81 '''a required node is not present in the destination workspace. 82 83 This may occur both in the case where the bundle contains a 84 changeset which is a child of a node not present in the 85 destination workspace (because the destination workspace is not as 86 up-to-date as the source), or because the source and destination 87 workspace are not related. 88 89 It may also happen in cases where the uncommitted changes need to 90 be applied onto a node that the workspace does not possess even 91 after application of the bundle (on a branch not present 92 in the bundle or destination workspace, for instance)''' 93 94 def __init__(self, msg, name): 95 # 96 # If e.name is a string 20 characters long, it is 97 # assumed to be a node. (Mercurial makes this 98 # same assumption, when creating a LookupError) 99 # 100 if isinstance(name, str) and len(name) == 20: 101 n = node.short(name) 102 else: 103 n = name 104 105 util.Abort.__init__(self, "%s: changeset '%s' is missing\n" 106 "Your workspace is either not " 107 "sufficiently up to date,\n" 108 "or is unrelated to the workspace from " 109 "which the backup was taken.\n" % (msg, n)) 110 111 112class CdmTarFile(tarfile.TarFile): 113 '''Tar file access + simple comparison to the filesystem, and 114 creation addition of files from Mercurial filectx objects.''' 115 116 def __init__(self, *args, **kwargs): 117 tarfile.TarFile.__init__(self, *args, **kwargs) 118 self.errorlevel = 2 119 120 def members_match_fs(self, rootpath): 121 '''Compare the contents of the tar archive to the directory 122 specified by rootpath. Return False if they differ. 123 124 Every file in the archive must match the equivalent file in 125 the filesystem. 126 127 The existence, modification time, and size of each file are 128 compared, content is not.''' 129 130 def _member_matches_fs(member, rootpath): 131 '''Compare a single member to its filesystem counterpart''' 132 fpath = os.path.join(rootpath, member.name) 133 134 if not os.path.exists(fpath): 135 return False 136 elif ((os.path.isfile(fpath) != member.isfile()) or 137 (os.path.isdir(fpath) != member.isdir()) or 138 (os.path.islink(fpath) != member.issym())): 139 return False 140 141 # 142 # The filesystem may return a modification time with a 143 # fractional component (as a float), whereas the tar format 144 # only stores it to the whole second, perform the comparison 145 # using integers (truncated, not rounded) 146 # 147 elif member.mtime != int(os.path.getmtime(fpath)): 148 return False 149 elif not member.isdir() and member.size != os.path.getsize(fpath): 150 return False 151 else: 152 return True 153 154 for elt in self: 155 if not _member_matches_fs(elt, rootpath): 156 return False 157 158 return True 159 160 def addfilectx(self, filectx, path=None): 161 '''Add a filectx object to the archive. 162 163 Use the path specified by the filectx object or, if specified, 164 the PATH argument. 165 166 The size, modification time, type and permissions of the tar 167 member are taken from the filectx object, user and group id 168 are those of the invoking user, user and group name are those 169 of the invoking user if information is available, or "unknown" 170 if it is not. 171 ''' 172 173 t = tarfile.TarInfo(path or filectx.path()) 174 t.size = filectx.size() 175 t.mtime = filectx.date()[0] 176 t.uid = os.getuid() 177 t.gid = os.getgid() 178 179 try: 180 t.uname = pwd.getpwuid(t.uid).pw_name 181 except KeyError: 182 t.uname = "unknown" 183 184 try: 185 t.gname = grp.getgrgid(t.gid).gr_name 186 except KeyError: 187 t.gname = "unknown" 188 189 # 190 # Mercurial versions symlinks by setting a flag and storing 191 # the destination path in place of the file content. The 192 # actual contents (in the tar), should be empty. 193 # 194 if 'l' in filectx.flags(): 195 t.type = tarfile.SYMTYPE 196 t.mode = 0777 197 t.linkname = filectx.data() 198 data = None 199 else: 200 t.type = tarfile.REGTYPE 201 t.mode = 'x' in filectx.flags() and 0755 or 0644 202 data = StringIO(filectx.data()) 203 204 self.addfile(t, data) 205 206 207class CdmCommittedBackup(object): 208 '''Backup of committed changes''' 209 210 def __init__(self, backup, ws): 211 self.ws = ws 212 self.bu = backup 213 self.files = ('bundle', 'nodes') 214 215 def _outgoing_nodes(self, parent): 216 '''Return a list of all outgoing nodes in hex format''' 217 218 if parent: 219 outgoing = self.ws.findoutgoing(parent) 220 nodes = self.ws.repo.changelog.nodesbetween(outgoing)[0] 221 return map(node.hex, nodes) 222 else: 223 return [] 224 225 def backup(self): 226 '''Backup committed changes''' 227 parent = self.ws.parent() 228 229 if not parent: 230 self.ws.ui.warn('Workspace has no parent, committed changes will ' 231 'not be backed up\n') 232 return 233 234 out = self.ws.findoutgoing(parent) 235 if not out: 236 return 237 238 cg = self.ws.repo.changegroup(out, 'bundle') 239 changegroup.writebundle(cg, self.bu.backupfile('bundle'), 'HG10BZ') 240 241 outnodes = self._outgoing_nodes(parent) 242 if not outnodes: 243 return 244 245 fp = None 246 try: 247 try: 248 fp = self.bu.open('nodes', 'w') 249 fp.write('%s\n' % '\n'.join(outnodes)) 250 except EnvironmentError, e: 251 raise util.Abort("couldn't store outgoing nodes: %s" % e) 252 finally: 253 if fp and not fp.closed: 254 fp.close() 255 256 def restore(self): 257 '''Restore committed changes from backup''' 258 259 if not self.bu.exists('bundle'): 260 return 261 262 bpath = self.bu.backupfile('bundle') 263 f = None 264 try: 265 try: 266 f = self.bu.open('bundle') 267 bundle = changegroup.readbundle(f, bpath) 268 self.ws.repo.addchangegroup(bundle, 'strip', 269 'bundle:%s' % bpath) 270 except EnvironmentError, e: 271 raise util.Abort("couldn't restore committed changes: %s\n" 272 " %s" % (bpath, e)) 273 except error.LookupError, e: 274 raise CdmNodeMissing("couldn't restore committed changes", 275 e.name) 276 finally: 277 if f and not f.closed: 278 f.close() 279 280 def need_backup(self): 281 '''Compare backup of committed changes to workspace''' 282 283 if self.bu.exists('nodes'): 284 f = None 285 try: 286 try: 287 f = self.bu.open('nodes') 288 bnodes = set(line.rstrip('\r\n') for line in f.readlines()) 289 f.close() 290 except EnvironmentError, e: 291 raise util.Abort("couldn't open backup node list: %s" % e) 292 finally: 293 if f and not f.closed: 294 f.close() 295 else: 296 bnodes = set() 297 298 outnodes = set(self._outgoing_nodes(self.ws.parent())) 299 300 # 301 # If there are outgoing nodes not in the prior backup we need 302 # to take a new backup; it's fine if there are nodes in the 303 # old backup which are no longer outgoing, however. 304 # 305 if not outnodes <= bnodes: 306 return True 307 308 return False 309 310 def cleanup(self): 311 '''Remove backed up committed changes''' 312 313 for f in self.files: 314 self.bu.unlink(f) 315 316 317class CdmUncommittedBackup(object): 318 '''Backup of uncommitted changes''' 319 320 def __init__(self, backup, ws): 321 self.ws = ws 322 self.bu = backup 323 self.wctx = self.ws.workingctx(worklist=True) 324 325 def _clobbering_renames(self): 326 '''Return a list of pairs of files representing renames/copies 327 that clobber already versioned files. [(old-name new-name)...] 328 ''' 329 330 # 331 # Note that this doesn't handle uncommitted merges 332 # as CdmUncommittedBackup itself doesn't. 333 # 334 parent = self.wctx.parents()[0] 335 336 ret = [] 337 for fname in self.wctx.added() + self.wctx.modified(): 338 rn = self.wctx.filectx(fname).renamed() 339 if rn and fname in parent: 340 ret.append((rn[0], fname)) 341 return ret 342 343 def backup(self): 344 '''Backup uncommitted changes''' 345 346 if self.ws.merged(): 347 raise util.Abort("Unable to backup an uncommitted merge.\n" 348 "Please complete your merge and commit") 349 350 dirstate = node.hex(self.wctx.parents()[0].node()) 351 352 fp = None 353 try: 354 try: 355 fp = self.bu.open('dirstate', 'w') 356 fp.write(dirstate + '\n') 357 fp.close() 358 except EnvironmentError, e: 359 raise util.Abort("couldn't save working copy parent: %s" % e) 360 361 try: 362 fp = self.bu.open('renames', 'w') 363 for cons in self._clobbering_renames(): 364 fp.write("%s %s\n" % cons) 365 fp.close() 366 except EnvironmentError, e: 367 raise util.Abort("couldn't save clobbering copies: %s" % e) 368 369 try: 370 fp = self.bu.open('diff', 'w') 371 match = self.ws.matcher(files=self.wctx.files()) 372 fp.write(self.ws.diff(opts={'git': True}, match=match)) 373 except EnvironmentError, e: 374 raise util.Abort("couldn't save working copy diff: %s" % e) 375 finally: 376 if fp and not fp.closed: 377 fp.close() 378 379 def _dirstate(self): 380 '''Return the desired working copy node from the backup''' 381 fp = None 382 try: 383 try: 384 fp = self.bu.open('dirstate') 385 dirstate = fp.readline().strip() 386 except EnvironmentError, e: 387 raise util.Abort("couldn't read saved parent: %s" % e) 388 finally: 389 if fp and not fp.closed: 390 fp.close() 391 392 return dirstate 393 394 def restore(self): 395 '''Restore uncommitted changes''' 396 dirstate = self._dirstate() 397 398 # 399 # Check that the patch's parent changeset exists. 400 # 401 try: 402 n = node.bin(dirstate) 403 self.ws.repo.changelog.lookup(n) 404 except error.LookupError, e: 405 raise CdmNodeMissing("couldn't restore uncommitted changes", 406 e.name) 407 408 try: 409 self.ws.clean(rev=dirstate) 410 except util.Abort, e: 411 raise util.Abort("couldn't update to saved node: %s" % e) 412 413 if not self.bu.exists('diff'): 414 return 415 416 # 417 # There's a race here whereby if the patch (or part thereof) 418 # is applied within the same second as the clean above (such 419 # that modification time doesn't change) and if the size of 420 # that file does not change, Hg may not see the change. 421 # 422 # We sleep a full second to avoid this, as sleeping merely 423 # until the next second begins would require very close clock 424 # synchronization on network filesystems. 425 # 426 time.sleep(1) 427 428 files = {} 429 try: 430 diff = self.bu.backupfile('diff') 431 try: 432 fuzz = patch.patch(diff, self.ws.ui, strip=1, 433 cwd=self.ws.repo.root, files=files) 434 if fuzz: 435 raise util.Abort('working copy diff applied with fuzz') 436 except Exception, e: 437 raise util.Abort("couldn't apply working copy diff: %s\n" 438 " %s" % (diff, e)) 439 finally: 440 patch.updatedir(self.ws.ui, self.ws.repo, files) 441 442 if not self.bu.exists('renames'): 443 return 444 445 # 446 # We need to re-apply name changes where the new name 447 # (rename/copy destination) is an already versioned file, as 448 # Hg would otherwise ignore them. 449 # 450 try: 451 fp = self.bu.open('renames') 452 for line in fp: 453 source, dest = line.strip().split() 454 self.ws.copy(source, dest) 455 except EnvironmentError, e: 456 raise util.Abort('unable to open renames file: %s' % e) 457 except ValueError: 458 raise util.Abort('corrupt renames file: %s' % 459 self.bu.backupfile('renames')) 460 461 def need_backup(self): 462 '''Compare backup of uncommitted changes to workspace''' 463 cnode = self.wctx.parents()[0].node() 464 if self._dirstate() != node.hex(cnode): 465 return True 466 467 fd = None 468 match = self.ws.matcher(files=self.wctx.files()) 469 curdiff = self.ws.diff(opts={'git': True}, match=match) 470 471 try: 472 if self.bu.exists('diff'): 473 try: 474 fd = self.bu.open('diff') 475 backdiff = fd.read() 476 fd.close() 477 except EnvironmentError, e: 478 raise util.Abort("couldn't open backup diff %s\n" 479 " %s" % (self.bu.backupfile('diff'), e)) 480 else: 481 backdiff = '' 482 483 if backdiff != curdiff: 484 return True 485 486 currrenamed = self._clobbering_renames() 487 bakrenamed = None 488 489 if self.bu.exists('renames'): 490 try: 491 fd = self.bu.open('renames') 492 bakrenamed = [tuple(line.strip().split(' ')) for line in fd] 493 fd.close() 494 except EnvironmentError, e: 495 raise util.Abort("couldn't open renames file %s: %s\n" % 496 (self.bu.backupfile('renames'), e)) 497 498 if currrenamed != bakrenamed: 499 return True 500 finally: 501 if fd and not fd.closed: 502 fd.close() 503 504 return False 505 506 def cleanup(self): 507 '''Remove backed up uncommitted changes''' 508 509 for f in ('dirstate', 'diff', 'renames'): 510 self.bu.unlink(f) 511 512 513class CdmMetadataBackup(object): 514 '''Backup of workspace metadata''' 515 516 def __init__(self, backup, ws): 517 self.bu = backup 518 self.ws = ws 519 self.files = ('hgrc', 'localtags', 'patches', 'cdm') 520 521 def backup(self): 522 '''Backup workspace metadata''' 523 524 tarpath = self.bu.backupfile('metadata.tar.gz') 525 526 # 527 # Files is a list of tuples (name, path), where name is as in 528 # self.files, and path is the absolute path. 529 # 530 files = filter(lambda (name, path): os.path.exists(path), 531 zip(self.files, map(self.ws.repo.join, self.files))) 532 533 if not files: 534 return 535 536 try: 537 tar = CdmTarFile.gzopen(tarpath, 'w') 538 except (EnvironmentError, tarfile.TarError), e: 539 raise util.Abort("couldn't open %s for writing: %s" % 540 (tarpath, e)) 541 542 try: 543 for name, path in files: 544 try: 545 tar.add(path, name) 546 except (EnvironmentError, tarfile.TarError), e: 547 # 548 # tarfile.TarError doesn't include the tar member or file 549 # in question, so we have to do so ourselves. 550 # 551 if isinstance(e, tarfile.TarError): 552 errstr = "%s: %s" % (name, e) 553 else: 554 errstr = str(e) 555 556 raise util.Abort("couldn't backup metadata to %s:\n" 557 " %s" % (tarpath, errstr)) 558 finally: 559 tar.close() 560 561 def old_restore(self): 562 '''Restore workspace metadata from an pre-tar backup''' 563 564 for fname in self.files: 565 if self.bu.exists(fname): 566 bfile = self.bu.backupfile(fname) 567 wfile = self.ws.repo.join(fname) 568 569 try: 570 shutil.copy2(bfile, wfile) 571 except EnvironmentError, e: 572 raise util.Abort("couldn't restore metadata from %s:\n" 573 " %s" % (bfile, e)) 574 575 def tar_restore(self): 576 '''Restore workspace metadata (from a tar-style backup)''' 577 578 if not self.bu.exists('metadata.tar.gz'): 579 return 580 581 tarpath = self.bu.backupfile('metadata.tar.gz') 582 583 try: 584 tar = CdmTarFile.gzopen(tarpath) 585 except (EnvironmentError, tarfile.TarError), e: 586 raise util.Abort("couldn't open %s: %s" % (tarpath, e)) 587 588 try: 589 for elt in tar: 590 try: 591 tar.extract(elt, path=self.ws.repo.path) 592 except (EnvironmentError, tarfile.TarError), e: 593 # Make sure the member name is in the exception message. 594 if isinstance(e, tarfile.TarError): 595 errstr = "%s: %s" % (elt.name, e) 596 else: 597 errstr = str(e) 598 599 raise util.Abort("couldn't restore metadata from %s:\n" 600 " %s" % 601 (tarpath, errstr)) 602 finally: 603 if tar and not tar.closed: 604 tar.close() 605 606 def restore(self): 607 '''Restore workspace metadata''' 608 609 if self.bu.exists('hgrc'): 610 self.old_restore() 611 else: 612 self.tar_restore() 613 614 def _walk(self): 615 '''Yield the repo-relative path to each file we operate on, 616 including each file within any affected directory''' 617 618 for elt in self.files: 619 path = self.ws.repo.join(elt) 620 621 if not os.path.exists(path): 622 continue 623 624 if os.path.isdir(path): 625 for root, dirs, files in os.walk(path, topdown=True): 626 yield root 627 628 for f in files: 629 yield os.path.join(root, f) 630 else: 631 yield path 632 633 def need_backup(self): 634 '''Compare backed up workspace metadata to workspace''' 635 636 def strip_trailing_pathsep(pathname): 637 '''Remove a possible trailing path separator from PATHNAME''' 638 return pathname.endswith('/') and pathname[:-1] or pathname 639 640 if self.bu.exists('metadata.tar.gz'): 641 tarpath = self.bu.backupfile('metadata.tar.gz') 642 try: 643 tar = CdmTarFile.gzopen(tarpath) 644 except (EnvironmentError, tarfile.TarError), e: 645 raise util.Abort("couldn't open metadata tarball: %s\n" 646 " %s" % (tarpath, e)) 647 648 if not tar.members_match_fs(self.ws.repo.path): 649 tar.close() 650 return True 651 652 tarnames = map(strip_trailing_pathsep, tar.getnames()) 653 tar.close() 654 else: 655 tarnames = [] 656 657 repopath = self.ws.repo.path 658 if not repopath.endswith('/'): 659 repopath += '/' 660 661 for path in self._walk(): 662 if path.replace(repopath, '', 1) not in tarnames: 663 return True 664 665 return False 666 667 def cleanup(self): 668 '''Remove backed up workspace metadata''' 669 self.bu.unlink('metadata.tar.gz') 670 671 672class CdmClearBackup(object): 673 '''A backup (in tar format) of complete source files from every 674 workspace head. 675 676 Paths in the tarball are prefixed by the revision and node of the 677 head, or "working" for the working directory. 678 679 This is done purely for the benefit of the user, and as such takes 680 no part in restore or need_backup checking, restore always 681 succeeds, need_backup always returns False 682 ''' 683 684 def __init__(self, backup, ws): 685 self.bu = backup 686 self.ws = ws 687 688 def _branch_pairs(self): 689 '''Return a list of tuples (parenttip, localtip) for each 690 outgoing head. If the working copy contains modified files, 691 it is a head, and neither of its parents are. 692 ''' 693 694 parent = self.ws.parent() 695 696 if parent: 697 outgoing = self.ws.findoutgoing(parent) 698 outnodes = set(self.ws.repo.changelog.nodesbetween(outgoing)[0]) 699 700 heads = [self.ws.repo.changectx(n) for n in self.ws.repo.heads() 701 if n in outnodes] 702 else: 703 heads = [] 704 outnodes = [] 705 706 wctx = self.ws.workingctx() 707 if wctx.files(): # We only care about file changes. 708 heads = filter(lambda x: x not in wctx.parents(), heads) + [wctx] 709 710 pairs = [] 711 for head in heads: 712 if head.rev() is None: 713 c = head.parents() 714 else: 715 c = [head] 716 717 pairs.append((self.ws.parenttip(c, outnodes), head)) 718 return pairs 719 720 def backup(self): 721 '''Save a clear copy of each source file modified between each 722 head and that head's parenttip (see WorkSpace.parenttip). 723 ''' 724 725 tarpath = self.bu.backupfile('clear.tar.gz') 726 branches = self._branch_pairs() 727 728 if not branches: 729 return 730 731 try: 732 tar = CdmTarFile.gzopen(tarpath, 'w') 733 except (EnvironmentError, tarfile.TarError), e: 734 raise util.Abort("Could not open %s for writing: %s" % 735 (tarpath, e)) 736 737 try: 738 for parent, child in branches: 739 tpath = child.node() and node.short(child.node()) or "working" 740 741 for fname, change in self.ws.status(parent, child).iteritems(): 742 if change not in ('added', 'modified'): 743 continue 744 745 try: 746 tar.addfilectx(child.filectx(fname), 747 os.path.join(tpath, fname)) 748 except ValueError, e: 749 crev = child.rev() 750 if crev is None: 751 crev = "working copy" 752 raise util.Abort("Could not backup clear file %s " 753 "from %s: %s\n" % (fname, crev, e)) 754 finally: 755 tar.close() 756 757 def cleanup(self): 758 '''Cleanup a failed Clear backup. 759 760 Remove the clear tarball from the backup directory. 761 ''' 762 763 self.bu.unlink('clear.tar.gz') 764 765 def restore(self): 766 '''Clear backups are never restored, do nothing''' 767 pass 768 769 def need_backup(self): 770 '''Clear backups are never compared, return False (no backup needed). 771 772 Should a backup actually be needed, one of the other 773 implementation classes would notice in any situation we would. 774 ''' 775 776 return False 777 778 779class CdmBackup(object): 780 '''A backup of a given workspace''' 781 782 def __init__(self, ui, ws, name): 783 self.ws = ws 784 self.ui = ui 785 self.backupdir = self._find_backup_dir(name) 786 787 # 788 # The order of instances here controls the order the various operations 789 # are run. 790 # 791 # There's some inherent dependence, in that on restore we need 792 # to restore committed changes prior to uncommitted changes 793 # (as the parent revision of any uncommitted changes is quite 794 # likely to not exist until committed changes are restored). 795 # Metadata restore can happen at any point, but happens last 796 # as a matter of convention. 797 # 798 self.modules = [x(self, ws) for x in [CdmCommittedBackup, 799 CdmUncommittedBackup, 800 CdmClearBackup, 801 CdmMetadataBackup]] 802 803 if os.path.exists(os.path.join(self.backupdir, 'latest')): 804 generation = os.readlink(os.path.join(self.backupdir, 'latest')) 805 self.generation = int(os.path.split(generation)[1]) 806 else: 807 self.generation = 0 808 809 def _find_backup_dir(self, name): 810 '''Find the path to an appropriate backup directory based on NAME''' 811 812 if os.path.isabs(name): 813 return name 814 815 if self.ui.config('cdm', 'backupdir'): 816 backupbase = os.path.expanduser(self.ui.config('cdm', 'backupdir')) 817 else: 818 home = None 819 820 try: 821 home = os.getenv('HOME') or pwd.getpwuid(os.getuid()).pw_dir 822 except KeyError: 823 pass # Handled anyway 824 825 if not home: 826 raise util.Abort('Could not determine your HOME directory to ' 827 'find backup path') 828 829 backupbase = os.path.join(home, 'cdm.backup') 830 831 backupdir = os.path.join(backupbase, name) 832 833 # If backupdir exists, it must be a directory. 834 if (os.path.exists(backupdir) and not os.path.isdir(backupdir)): 835 raise util.Abort('%s exists but is not a directory' % backupdir) 836 837 return backupdir 838 839 def _update_latest(self, gen): 840 '''Update latest symlink to point to the current generation''' 841 linkpath = os.path.join(self.backupdir, 'latest') 842 843 if os.path.lexists(linkpath): 844 os.unlink(linkpath) 845 846 os.symlink(str(gen), linkpath) 847 848 def _create_gen(self, gen): 849 '''Create a new backup generation''' 850 try: 851 os.makedirs(os.path.join(self.backupdir, str(gen))) 852 self._update_latest(gen) 853 except EnvironmentError, e: 854 raise util.Abort("Couldn't create backup generation %s: %s" % 855 (os.path.join(self.backupdir, str(gen)), e)) 856 857 def backupfile(self, path): 858 '''return full path to backup file FILE at GEN''' 859 return os.path.join(self.backupdir, str(self.generation), path) 860 861 def unlink(self, name): 862 '''Unlink the specified path from the backup directory. 863 A no-op if the path does not exist. 864 ''' 865 866 fpath = self.backupfile(name) 867 if os.path.exists(fpath): 868 os.unlink(fpath) 869 870 def open(self, name, mode='r'): 871 '''Open the specified file in the backup directory''' 872 return open(self.backupfile(name), mode) 873 874 def exists(self, name): 875 '''Return boolean indicating wether a given file exists in the 876 backup directory.''' 877 return os.path.exists(self.backupfile(name)) 878 879 def need_backup(self): 880 '''Compare backed up changes to workspace''' 881 # 882 # If there's no current backup generation, or the last backup was 883 # invalid (lacking the dirstate file), we need a backup regardless 884 # of anything else. 885 # 886 if not self.generation or not self.exists('dirstate'): 887 return True 888 889 for x in self.modules: 890 if x.need_backup(): 891 return True 892 893 return False 894 895 def backup(self): 896 '''Take a backup of the current workspace 897 898 Calling code is expected to hold both the working copy lock 899 and repository lock.''' 900 901 if not os.path.exists(self.backupdir): 902 try: 903 os.makedirs(self.backupdir) 904 except EnvironmentError, e: 905 raise util.Abort('Could not create backup directory %s: %s' % 906 (self.backupdir, e)) 907 908 self.generation += 1 909 self._create_gen(self.generation) 910 911 try: 912 for x in self.modules: 913 x.backup() 914 except Exception, e: 915 if isinstance(e, KeyboardInterrupt): 916 self.ws.ui.warn("Interrupted\n") 917 else: 918 self.ws.ui.warn("Error: %s\n" % e) 919 show_traceback = self.ws.ui.configbool('ui', 'traceback', 920 False) 921 922 # 923 # If it's not a 'normal' error, we want to print a stack 924 # trace now in case the attempt to remove the partial 925 # backup also fails, and raises a second exception. 926 # 927 if (not isinstance(e, (EnvironmentError, util.Abort)) 928 or show_traceback): 929 traceback.print_exc() 930 931 for x in self.modules: 932 x.cleanup() 933 934 os.rmdir(os.path.join(self.backupdir, str(self.generation))) 935 self.generation -= 1 936 937 if self.generation != 0: 938 self._update_latest(self.generation) 939 else: 940 os.unlink(os.path.join(self.backupdir, 'latest')) 941 942 raise util.Abort('Backup failed') 943 944 def restore(self, gen=None): 945 '''Restore workspace from backup 946 947 Restores from backup generation GEN (defaulting to the latest) 948 into workspace WS. 949 950 Calling code is expected to hold both the working copy lock 951 and repository lock of the destination workspace.''' 952 953 if not os.path.exists(self.backupdir): 954 raise util.Abort('Backup directory does not exist: %s' % 955 (self.backupdir)) 956 957 if gen: 958 if not os.path.exists(os.path.join(self.backupdir, str(gen))): 959 raise util.Abort('Backup generation does not exist: %s' % 960 (os.path.join(self.backupdir, str(gen)))) 961 self.generation = int(gen) 962 963 if not self.generation: # This is OK, 0 is not a valid generation 964 raise util.Abort('Backup has no generations: %s' % self.backupdir) 965 966 if not self.exists('dirstate'): 967 raise util.Abort('Backup %s/%s is incomplete (dirstate missing)' % 968 (self.backupdir, self.generation)) 969 970 try: 971 for x in self.modules: 972 x.restore() 973 except util.Abort, e: 974 raise util.Abort('Error restoring workspace:\n' 975 '%s\n' 976 'Workspace may be partially restored' % e) 977