comparison mercurial/revlog.py @ 192:5d8553352d2e

Changes to network protocol -----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1 Changes to network protocol Stream changes at the delta level rather than at whole delta groups this breaks the protocol - we now send a zero byte delta to indicate the end of a group rather than sending the entire group length up front Fix filename length asymmetry while we're breaking things Fix hidden O(n^2) bug in calculating changegroup list.append(e) is O(n), list + [element] is not Decompress chunks on read in revlog.group() Improve status messages report bytes transferred report nothing to do Deal with /dev/null path brokenness Remove untriggered patch assertion manifest hash: 3eedcfe878561f9eb4adedb04f6be618fb8ae8d8 -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.0 (GNU/Linux) iD8DBQFCmzlqywK+sNU5EO8RAn0KAJ4z4toWSSGjLoZO6FKWLx/3QbZufACglQgd S48bumc++DnuY1iPSNWKGAI= =lCjx -----END PGP SIGNATURE-----
author mpm@selenic.com
date Mon, 30 May 2005 08:03:54 -0800
parents 083c38bdfa64
children 0a37e9c8ad6c
comparison
equal deleted inserted replaced
191:d7e859cf2f1b 192:5d8553352d2e
242 base = self.base(t) 242 base = self.base(t)
243 start = self.start(base) 243 start = self.start(base)
244 end = self.end(t) 244 end = self.end(t)
245 prev = self.revision(self.tip()) 245 prev = self.revision(self.tip())
246 d = self.diff(prev, text) 246 d = self.diff(prev, text)
247 if self.patches(prev, [d]) != text:
248 raise AssertionError("diff failed")
249 data = compress(d) 247 data = compress(d)
250 dist = end - start + len(data) 248 dist = end - start + len(data)
251 249
252 # full versions are inserted when the needed deltas 250 # full versions are inserted when the needed deltas
253 # become comparable to the uncompressed text 251 # become comparable to the uncompressed text
328 if self.index[i][3] in linkmap: 326 if self.index[i][3] in linkmap:
329 revs.append(i) 327 revs.append(i)
330 needed[i] = 1 328 needed[i] = 1
331 329
332 # if we don't have any revisions touched by these changesets, bail 330 # if we don't have any revisions touched by these changesets, bail
333 if not revs: return struct.pack(">l", 0) 331 if not revs:
332 yield struct.pack(">l", 0)
333 return
334 334
335 # add the parent of the first rev 335 # add the parent of the first rev
336 p = self.parents(self.node(revs[0]))[0] 336 p = self.parents(self.node(revs[0]))[0]
337 revs.insert(0, self.rev(p)) 337 revs.insert(0, self.rev(p))
338 338
350 350
351 # calculate spans to retrieve from datafile 351 # calculate spans to retrieve from datafile
352 needed = needed.keys() 352 needed = needed.keys()
353 needed.sort() 353 needed.sort()
354 spans = [] 354 spans = []
355 oo = -1
356 ol = 0
355 for n in needed: 357 for n in needed:
356 if n < 0: continue 358 if n < 0: continue
357 o = self.start(n) 359 o = self.start(n)
358 l = self.length(n) 360 l = self.length(n)
359 spans.append((o, l, [(n, l)])) 361 if oo + ol == o: # can we merge with the previous?
360 362 nl = spans[-1][2]
361 # merge spans 363 nl.append((n, l))
362 merge = [spans.pop(0)] 364 ol += l
363 while spans: 365 spans[-1] = (oo, ol, nl)
364 e = spans.pop(0)
365 f = merge[-1]
366 if e[0] == f[0] + f[1]:
367 merge[-1] = (f[0], f[1] + e[1], f[2] + e[2])
368 else: 366 else:
369 merge.append(e) 367 oo = o
368 ol = l
369 spans.append((oo, ol, [(n, l)]))
370 370
371 # read spans in, divide up chunks 371 # read spans in, divide up chunks
372 chunks = {} 372 chunks = {}
373 for span in merge: 373 for span in spans:
374 # we reopen the file for each span to make http happy for now 374 # we reopen the file for each span to make http happy for now
375 f = self.opener(self.datafile) 375 f = self.opener(self.datafile)
376 f.seek(span[0]) 376 f.seek(span[0])
377 data = f.read(span[1]) 377 data = f.read(span[1])
378 378
379 # divide up the span 379 # divide up the span
380 pos = 0 380 pos = 0
381 for r, l in span[2]: 381 for r, l in span[2]:
382 chunks[r] = data[pos: pos + l] 382 chunks[r] = decompress(data[pos: pos + l])
383 pos += l 383 pos += l
384 384
385 # helper to reconstruct intermediate versions 385 # helper to reconstruct intermediate versions
386 def construct(text, base, rev): 386 def construct(text, base, rev):
387 bins = [decompress(chunks[r]) for r in xrange(base + 1, rev + 1)] 387 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
388 return mdiff.patches(text, bins) 388 return mdiff.patches(text, bins)
389 389
390 # build deltas 390 # build deltas
391 deltas = [] 391 deltas = []
392 for d in xrange(0, len(revs) - 1): 392 for d in xrange(0, len(revs) - 1):
393 a, b = revs[d], revs[d + 1] 393 a, b = revs[d], revs[d + 1]
394 n = self.node(b) 394 n = self.node(b)
395 395
396 # do we need to construct a new delta?
396 if a + 1 != b or self.base(b) == b: 397 if a + 1 != b or self.base(b) == b:
397 if a >= 0: 398 if a >= 0:
398 base = self.base(a) 399 base = self.base(a)
399 ta = decompress(chunks[self.base(a)]) 400 ta = chunks[self.base(a)]
400 ta = construct(ta, base, a) 401 ta = construct(ta, base, a)
401 else: 402 else:
402 ta = "" 403 ta = ""
403 404
404 base = self.base(b) 405 base = self.base(b)
405 if a > base: 406 if a > base:
406 base = a 407 base = a
407 tb = ta 408 tb = ta
408 else: 409 else:
409 tb = decompress(chunks[self.base(b)]) 410 tb = chunks[self.base(b)]
410 tb = construct(tb, base, b) 411 tb = construct(tb, base, b)
411 d = self.diff(ta, tb) 412 d = self.diff(ta, tb)
412 else: 413 else:
413 d = decompress(chunks[b]) 414 d = chunks[b]
414 415
415 p = self.parents(n) 416 p = self.parents(n)
416 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)] 417 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
417 l = struct.pack(">l", len(meta) + len(d) + 4) 418 l = struct.pack(">l", len(meta) + len(d) + 4)
418 deltas.append(l + meta + d) 419 yield l
419 420 yield meta
420 l = struct.pack(">l", sum(map(len, deltas)) + 4) 421 yield d
421 deltas.insert(0, l) 422
422 return "".join(deltas) 423 yield struct.pack(">l", 0)
423 424
424 def addgroup(self, data, linkmapper, transaction): 425 def addgroup(self, revs, linkmapper, transaction):
425 # given a set of deltas, add them to the revision log. the 426 # given a set of deltas, add them to the revision log. the
426 # first delta is against its parent, which should be in our 427 # first delta is against its parent, which should be in our
427 # log, the rest are against the previous delta. 428 # log, the rest are against the previous delta.
428 429
429 if not data: return self.tip()
430
431 # retrieve the parent revision of the delta chain
432 chain = data[24:44]
433 if not chain in self.nodemap:
434 raise "unknown base %s" % short(chain[:4])
435
436 # track the base of the current delta log 430 # track the base of the current delta log
437 r = self.count() 431 r = self.count()
438 t = r - 1 432 t = r - 1
433 node = nullid
439 434
440 base = prev = -1 435 base = prev = -1
441 start = end = 0 436 start = end = 0
442 if r: 437 if r:
443 start = self.start(self.base(t)) 438 start = self.start(self.base(t))
450 transaction.add(self.indexfile, r * struct.calcsize(indexformat)) 445 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
451 dfh = self.opener(self.datafile, "a") 446 dfh = self.opener(self.datafile, "a")
452 ifh = self.opener(self.indexfile, "a") 447 ifh = self.opener(self.indexfile, "a")
453 448
454 # loop through our set of deltas 449 # loop through our set of deltas
455 pos = 0 450 chain = None
456 while pos < len(data): 451 for chunk in revs:
457 l, node, p1, p2, cs = struct.unpack(">l20s20s20s20s", 452 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
458 data[pos:pos+84])
459 link = linkmapper(cs) 453 link = linkmapper(cs)
460 if node in self.nodemap: 454 if node in self.nodemap:
461 raise "already have %s" % hex(node[:4]) 455 raise "already have %s" % hex(node[:4])
462 delta = data[pos + 84:pos + l] 456 delta = chunk[80:]
463 pos += l 457
458 if not chain:
459 # retrieve the parent revision of the delta chain
460 chain = p1
461 if not chain in self.nodemap:
462 raise "unknown base %s" % short(chain[:4])
464 463
465 # full versions are inserted when the needed deltas become 464 # full versions are inserted when the needed deltas become
466 # comparable to the uncompressed text or when the previous 465 # comparable to the uncompressed text or when the previous
467 # version is not the one we have a delta against. We use 466 # version is not the one we have a delta against. We use
468 # the size of the previous full rev as a proxy for the 467 # the size of the previous full rev as a proxy for the