changeset 116:e484cd5ec282

Only use lazy indexing for big indices and avoid the overhead of the lazy index in the small index case.
author mpm@selenic.com
date Fri, 20 May 2005 17:35:20 -0800
parents 39b438eeb25a
children 2ac722ad1a9d
files mercurial/revlog.py
diffstat 1 files changed, 24 insertions(+), 4 deletions(-) [+]
line wrap: on
line diff
--- a/mercurial/revlog.py	Fri May 20 17:34:04 2005 -0800
+++ b/mercurial/revlog.py	Fri May 20 17:35:20 2005 -0800
@@ -124,14 +124,34 @@
         self.datafile = datafile
         self.opener = opener
         self.cache = None
-        # read the whole index for now, handle on-demand later
+
         try:
             i = self.opener(self.indexfile).read()
         except IOError:
             i = ""
-        parser = lazyparser(i)
-        self.index = lazyindex(parser)
-        self.nodemap = lazymap(parser)
+
+        if len(i) > 10000:
+            # big index, let's parse it on demand
+            parser = lazyparser(i)
+            self.index = lazyindex(parser)
+            self.nodemap = lazymap(parser)
+        else:
+            s = struct.calcsize(indexformat)
+            l = len(i) / s
+            self.index = [None] * l
+            m = [None] * l
+
+            n = 0
+            for f in xrange(0, len(i), s):
+                # offset, size, base, linkrev, p1, p2, nodeid
+                e = struct.unpack(indexformat, i[f:f + s])
+                m[n] = (e[6], n)
+                self.index[n] = e
+                n += 1
+
+            self.nodemap = dict(m)
+            self.nodemap[nullid] = -1
+            
 
     def tip(self): return self.node(len(self.index) - 1)
     def count(self): return len(self.index)