add hg and python
This commit is contained in:
parent
3a742c699f
commit
458120dd40
3709 changed files with 1244309 additions and 1 deletions
74
sys/lib/python/mercurial/pure/base85.py
Normal file
74
sys/lib/python/mercurial/pure/base85.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
# base85.py: pure python base85 codec
|
||||
#
|
||||
# Copyright (C) 2009 Brendan Cully <brendan@kublai.com>
|
||||
#
|
||||
# This software may be used and distributed according to the terms of the
|
||||
# GNU General Public License version 2, incorporated herein by reference.
|
||||
|
||||
import struct
|
||||
|
||||
_b85chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
|
||||
"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~"
|
||||
_b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
|
||||
_b85dec = {}
|
||||
|
||||
def _mkb85dec():
|
||||
for i, c in enumerate(_b85chars):
|
||||
_b85dec[c] = i
|
||||
|
||||
def b85encode(text, pad=False):
|
||||
"""encode text in base85 format"""
|
||||
l = len(text)
|
||||
r = l % 4
|
||||
if r:
|
||||
text += '\0' * (4 - r)
|
||||
longs = len(text) >> 2
|
||||
words = struct.unpack('>%dL' % (longs), text)
|
||||
|
||||
out = ''.join(_b85chars[(word // 52200625) % 85] +
|
||||
_b85chars2[(word // 7225) % 7225] +
|
||||
_b85chars2[word % 7225]
|
||||
for word in words)
|
||||
|
||||
if pad:
|
||||
return out
|
||||
|
||||
# Trim padding
|
||||
olen = l % 4
|
||||
if olen:
|
||||
olen += 1
|
||||
olen += l // 4 * 5
|
||||
return out[:olen]
|
||||
|
||||
def b85decode(text):
|
||||
"""decode base85-encoded text"""
|
||||
if not _b85dec:
|
||||
_mkb85dec()
|
||||
|
||||
l = len(text)
|
||||
out = []
|
||||
for i in range(0, len(text), 5):
|
||||
chunk = text[i:i+5]
|
||||
acc = 0
|
||||
for j, c in enumerate(chunk):
|
||||
try:
|
||||
acc = acc * 85 + _b85dec[c]
|
||||
except KeyError:
|
||||
raise TypeError('Bad base85 character at byte %d' % (i + j))
|
||||
if acc > 4294967295:
|
||||
raise OverflowError('Base85 overflow in hunk starting at byte %d' % i)
|
||||
out.append(acc)
|
||||
|
||||
# Pad final chunk if necessary
|
||||
cl = l % 5
|
||||
if cl:
|
||||
acc *= 85 ** (5 - cl)
|
||||
if cl > 1:
|
||||
acc += 0xffffff >> (cl - 2) * 8
|
||||
out[-1] = acc
|
||||
|
||||
out = struct.pack('>%dL' % (len(out)), *out)
|
||||
if cl:
|
||||
out = out[:-(5 - cl)]
|
||||
|
||||
return out
|
76
sys/lib/python/mercurial/pure/bdiff.py
Normal file
76
sys/lib/python/mercurial/pure/bdiff.py
Normal file
|
@ -0,0 +1,76 @@
|
|||
# bdiff.py - Python implementation of bdiff.c
|
||||
#
|
||||
# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
|
||||
#
|
||||
# This software may be used and distributed according to the terms of the
|
||||
# GNU General Public License version 2, incorporated herein by reference.
|
||||
|
||||
import struct, difflib
|
||||
|
||||
def splitnewlines(text):
|
||||
'''like str.splitlines, but only split on newlines.'''
|
||||
lines = [l + '\n' for l in text.split('\n')]
|
||||
if lines:
|
||||
if lines[-1] == '\n':
|
||||
lines.pop()
|
||||
else:
|
||||
lines[-1] = lines[-1][:-1]
|
||||
return lines
|
||||
|
||||
def _normalizeblocks(a, b, blocks):
|
||||
prev = None
|
||||
for curr in blocks:
|
||||
if prev is None:
|
||||
prev = curr
|
||||
continue
|
||||
shift = 0
|
||||
|
||||
a1, b1, l1 = prev
|
||||
a1end = a1 + l1
|
||||
b1end = b1 + l1
|
||||
|
||||
a2, b2, l2 = curr
|
||||
a2end = a2 + l2
|
||||
b2end = b2 + l2
|
||||
if a1end == a2:
|
||||
while a1end+shift < a2end and a[a1end+shift] == b[b1end+shift]:
|
||||
shift += 1
|
||||
elif b1end == b2:
|
||||
while b1end+shift < b2end and a[a1end+shift] == b[b1end+shift]:
|
||||
shift += 1
|
||||
yield a1, b1, l1+shift
|
||||
prev = a2+shift, b2+shift, l2-shift
|
||||
yield prev
|
||||
|
||||
def bdiff(a, b):
|
||||
a = str(a).splitlines(True)
|
||||
b = str(b).splitlines(True)
|
||||
|
||||
if not a:
|
||||
s = "".join(b)
|
||||
return s and (struct.pack(">lll", 0, 0, len(s)) + s)
|
||||
|
||||
bin = []
|
||||
p = [0]
|
||||
for i in a: p.append(p[-1] + len(i))
|
||||
|
||||
d = difflib.SequenceMatcher(None, a, b).get_matching_blocks()
|
||||
d = _normalizeblocks(a, b, d)
|
||||
la = 0
|
||||
lb = 0
|
||||
for am, bm, size in d:
|
||||
s = "".join(b[lb:bm])
|
||||
if am > la or s:
|
||||
bin.append(struct.pack(">lll", p[la], p[am], len(s)) + s)
|
||||
la = am + size
|
||||
lb = bm + size
|
||||
|
||||
return "".join(bin)
|
||||
|
||||
def blocks(a, b):
|
||||
an = splitnewlines(a)
|
||||
bn = splitnewlines(b)
|
||||
d = difflib.SequenceMatcher(None, an, bn).get_matching_blocks()
|
||||
d = _normalizeblocks(an, bn, d)
|
||||
return [(i, i + n, j, j + n) for (i, j, n) in d]
|
||||
|
56
sys/lib/python/mercurial/pure/diffhelpers.py
Normal file
56
sys/lib/python/mercurial/pure/diffhelpers.py
Normal file
|
@ -0,0 +1,56 @@
|
|||
# diffhelpers.py - pure Python implementation of diffhelpers.c
|
||||
#
|
||||
# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
|
||||
#
|
||||
# This software may be used and distributed according to the terms of the
|
||||
# GNU General Public License version 2, incorporated herein by reference.
|
||||
|
||||
def addlines(fp, hunk, lena, lenb, a, b):
|
||||
while True:
|
||||
todoa = lena - len(a)
|
||||
todob = lenb - len(b)
|
||||
num = max(todoa, todob)
|
||||
if num == 0:
|
||||
break
|
||||
for i in xrange(num):
|
||||
s = fp.readline()
|
||||
c = s[0]
|
||||
if s == "\\ No newline at end of file\n":
|
||||
fix_newline(hunk, a, b)
|
||||
continue
|
||||
if c == "\n":
|
||||
# Some patches may be missing the control char
|
||||
# on empty lines. Supply a leading space.
|
||||
s = " \n"
|
||||
hunk.append(s)
|
||||
if c == "+":
|
||||
b.append(s[1:])
|
||||
elif c == "-":
|
||||
a.append(s)
|
||||
else:
|
||||
b.append(s[1:])
|
||||
a.append(s)
|
||||
return 0
|
||||
|
||||
def fix_newline(hunk, a, b):
|
||||
l = hunk[-1]
|
||||
c = l[0]
|
||||
hline = l[:-1]
|
||||
|
||||
if c == " " or c == "+":
|
||||
b[-1] = l[1:-1]
|
||||
if c == " " or c == "-":
|
||||
a[-1] = hline
|
||||
hunk[-1] = hline
|
||||
return 0
|
||||
|
||||
|
||||
def testhunk(a, b, bstart):
|
||||
alen = len(a)
|
||||
blen = len(b)
|
||||
if alen > blen - bstart:
|
||||
return -1
|
||||
for i in xrange(alen):
|
||||
if a[i][1:] != b[i + bstart]:
|
||||
return -1
|
||||
return 0
|
116
sys/lib/python/mercurial/pure/mpatch.py
Normal file
116
sys/lib/python/mercurial/pure/mpatch.py
Normal file
|
@ -0,0 +1,116 @@
|
|||
# mpatch.py - Python implementation of mpatch.c
|
||||
#
|
||||
# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
|
||||
#
|
||||
# This software may be used and distributed according to the terms of the
|
||||
# GNU General Public License version 2, incorporated herein by reference.
|
||||
|
||||
import struct
|
||||
try:
|
||||
from cStringIO import StringIO
|
||||
except ImportError:
|
||||
from StringIO import StringIO
|
||||
|
||||
# This attempts to apply a series of patches in time proportional to
|
||||
# the total size of the patches, rather than patches * len(text). This
|
||||
# means rather than shuffling strings around, we shuffle around
|
||||
# pointers to fragments with fragment lists.
|
||||
#
|
||||
# When the fragment lists get too long, we collapse them. To do this
|
||||
# efficiently, we do all our operations inside a buffer created by
|
||||
# mmap and simply use memmove. This avoids creating a bunch of large
|
||||
# temporary string buffers.
|
||||
|
||||
def patches(a, bins):
|
||||
if not bins: return a
|
||||
|
||||
plens = [len(x) for x in bins]
|
||||
pl = sum(plens)
|
||||
bl = len(a) + pl
|
||||
tl = bl + bl + pl # enough for the patches and two working texts
|
||||
b1, b2 = 0, bl
|
||||
|
||||
if not tl: return a
|
||||
|
||||
m = StringIO()
|
||||
def move(dest, src, count):
|
||||
"""move count bytes from src to dest
|
||||
|
||||
The file pointer is left at the end of dest.
|
||||
"""
|
||||
m.seek(src)
|
||||
buf = m.read(count)
|
||||
m.seek(dest)
|
||||
m.write(buf)
|
||||
|
||||
# load our original text
|
||||
m.write(a)
|
||||
frags = [(len(a), b1)]
|
||||
|
||||
# copy all the patches into our segment so we can memmove from them
|
||||
pos = b2 + bl
|
||||
m.seek(pos)
|
||||
for p in bins: m.write(p)
|
||||
|
||||
def pull(dst, src, l): # pull l bytes from src
|
||||
while l:
|
||||
f = src.pop(0)
|
||||
if f[0] > l: # do we need to split?
|
||||
src.insert(0, (f[0] - l, f[1] + l))
|
||||
dst.append((l, f[1]))
|
||||
return
|
||||
dst.append(f)
|
||||
l -= f[0]
|
||||
|
||||
def collect(buf, list):
|
||||
start = buf
|
||||
for l, p in list:
|
||||
move(buf, p, l)
|
||||
buf += l
|
||||
return (buf - start, start)
|
||||
|
||||
for plen in plens:
|
||||
# if our list gets too long, execute it
|
||||
if len(frags) > 128:
|
||||
b2, b1 = b1, b2
|
||||
frags = [collect(b1, frags)]
|
||||
|
||||
new = []
|
||||
end = pos + plen
|
||||
last = 0
|
||||
while pos < end:
|
||||
m.seek(pos)
|
||||
p1, p2, l = struct.unpack(">lll", m.read(12))
|
||||
pull(new, frags, p1 - last) # what didn't change
|
||||
pull([], frags, p2 - p1) # what got deleted
|
||||
new.append((l, pos + 12)) # what got added
|
||||
pos += l + 12
|
||||
last = p2
|
||||
frags = new + frags # what was left at the end
|
||||
|
||||
t = collect(b2, frags)
|
||||
|
||||
m.seek(t[1])
|
||||
return m.read(t[0])
|
||||
|
||||
def patchedsize(orig, delta):
|
||||
outlen, last, bin = 0, 0, 0
|
||||
binend = len(delta)
|
||||
data = 12
|
||||
|
||||
while data <= binend:
|
||||
decode = delta[bin:bin + 12]
|
||||
start, end, length = struct.unpack(">lll", decode)
|
||||
if start > end:
|
||||
break
|
||||
bin = data + length
|
||||
data = bin + 12
|
||||
outlen += start - last
|
||||
last = end
|
||||
outlen += length
|
||||
|
||||
if bin != binend:
|
||||
raise Exception("patch cannot be decoded")
|
||||
|
||||
outlen += orig - last
|
||||
return outlen
|
52
sys/lib/python/mercurial/pure/osutil.py
Normal file
52
sys/lib/python/mercurial/pure/osutil.py
Normal file
|
@ -0,0 +1,52 @@
|
|||
# osutil.py - pure Python version of osutil.c
|
||||
#
|
||||
# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
|
||||
#
|
||||
# This software may be used and distributed according to the terms of the
|
||||
# GNU General Public License version 2, incorporated herein by reference.
|
||||
|
||||
import os
|
||||
import stat as _stat
|
||||
|
||||
posixfile = open
|
||||
|
||||
def _mode_to_kind(mode):
|
||||
if _stat.S_ISREG(mode): return _stat.S_IFREG
|
||||
if _stat.S_ISDIR(mode): return _stat.S_IFDIR
|
||||
if _stat.S_ISLNK(mode): return _stat.S_IFLNK
|
||||
if _stat.S_ISBLK(mode): return _stat.S_IFBLK
|
||||
if _stat.S_ISCHR(mode): return _stat.S_IFCHR
|
||||
if _stat.S_ISFIFO(mode): return _stat.S_IFIFO
|
||||
if _stat.S_ISSOCK(mode): return _stat.S_IFSOCK
|
||||
return mode
|
||||
|
||||
def listdir(path, stat=False, skip=None):
|
||||
'''listdir(path, stat=False) -> list_of_tuples
|
||||
|
||||
Return a sorted list containing information about the entries
|
||||
in the directory.
|
||||
|
||||
If stat is True, each element is a 3-tuple:
|
||||
|
||||
(name, type, stat object)
|
||||
|
||||
Otherwise, each element is a 2-tuple:
|
||||
|
||||
(name, type)
|
||||
'''
|
||||
result = []
|
||||
prefix = path
|
||||
if not prefix.endswith(os.sep):
|
||||
prefix += os.sep
|
||||
names = os.listdir(path)
|
||||
names.sort()
|
||||
for fn in names:
|
||||
st = os.lstat(prefix + fn)
|
||||
if fn == skip and _stat.S_ISDIR(st.st_mode):
|
||||
return []
|
||||
if stat:
|
||||
result.append((fn, _mode_to_kind(st.st_mode), st))
|
||||
else:
|
||||
result.append((fn, _mode_to_kind(st.st_mode)))
|
||||
return result
|
||||
|
90
sys/lib/python/mercurial/pure/parsers.py
Normal file
90
sys/lib/python/mercurial/pure/parsers.py
Normal file
|
@ -0,0 +1,90 @@
|
|||
# parsers.py - Python implementation of parsers.c
|
||||
#
|
||||
# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
|
||||
#
|
||||
# This software may be used and distributed according to the terms of the
|
||||
# GNU General Public License version 2, incorporated herein by reference.
|
||||
|
||||
from mercurial.node import bin, nullid, nullrev
|
||||
from mercurial import util
|
||||
import struct, zlib
|
||||
|
||||
_pack = struct.pack
|
||||
_unpack = struct.unpack
|
||||
_compress = zlib.compress
|
||||
_decompress = zlib.decompress
|
||||
_sha = util.sha1
|
||||
|
||||
def parse_manifest(mfdict, fdict, lines):
|
||||
for l in lines.splitlines():
|
||||
f, n = l.split('\0')
|
||||
if len(n) > 40:
|
||||
fdict[f] = n[40:]
|
||||
mfdict[f] = bin(n[:40])
|
||||
else:
|
||||
mfdict[f] = bin(n)
|
||||
|
||||
def parse_index(data, inline):
|
||||
def gettype(q):
|
||||
return int(q & 0xFFFF)
|
||||
|
||||
def offset_type(offset, type):
|
||||
return long(long(offset) << 16 | type)
|
||||
|
||||
indexformatng = ">Qiiiiii20s12x"
|
||||
|
||||
s = struct.calcsize(indexformatng)
|
||||
index = []
|
||||
cache = None
|
||||
nodemap = {nullid: nullrev}
|
||||
n = off = 0
|
||||
# if we're not using lazymap, always read the whole index
|
||||
l = len(data) - s
|
||||
append = index.append
|
||||
if inline:
|
||||
cache = (0, data)
|
||||
while off <= l:
|
||||
e = _unpack(indexformatng, data[off:off + s])
|
||||
nodemap[e[7]] = n
|
||||
append(e)
|
||||
n += 1
|
||||
if e[1] < 0:
|
||||
break
|
||||
off += e[1] + s
|
||||
else:
|
||||
while off <= l:
|
||||
e = _unpack(indexformatng, data[off:off + s])
|
||||
nodemap[e[7]] = n
|
||||
append(e)
|
||||
n += 1
|
||||
off += s
|
||||
|
||||
e = list(index[0])
|
||||
type = gettype(e[0])
|
||||
e[0] = offset_type(0, type)
|
||||
index[0] = tuple(e)
|
||||
|
||||
# add the magic null revision at -1
|
||||
index.append((0, 0, 0, -1, -1, -1, -1, nullid))
|
||||
|
||||
return index, nodemap, cache
|
||||
|
||||
def parse_dirstate(dmap, copymap, st):
|
||||
parents = [st[:20], st[20: 40]]
|
||||
# deref fields so they will be local in loop
|
||||
format = ">cllll"
|
||||
e_size = struct.calcsize(format)
|
||||
pos1 = 40
|
||||
l = len(st)
|
||||
|
||||
# the inner loop
|
||||
while pos1 < l:
|
||||
pos2 = pos1 + e_size
|
||||
e = _unpack(">cllll", st[pos1:pos2]) # a literal here is faster
|
||||
pos1 = pos2 + e[4]
|
||||
f = st[pos2:pos1]
|
||||
if '\0' in f:
|
||||
f, c = f.split('\0')
|
||||
copymap[f] = c
|
||||
dmap[f] = e[:4]
|
||||
return parents
|
Loading…
Add table
Add a link
Reference in a new issue