add hg and python

This commit is contained in:
cinap_lenrek 2011-05-03 11:25:13 +00:00
parent 3a742c699f
commit 458120dd40
3709 changed files with 1244309 additions and 1 deletions

View file

@ -0,0 +1,109 @@
# __init__.py - inotify-based status acceleration for Linux
#
# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2, incorporated herein by reference.
'''accelerate status report using Linux's inotify service'''
# todo: socket permissions
from mercurial.i18n import _
from mercurial import cmdutil, util
import server
from weakref import proxy
from client import client, QueryFailed
def serve(ui, repo, **opts):
'''start an inotify server for this repository'''
timeout = opts.get('timeout')
if timeout:
timeout = float(timeout) * 1e3
class service(object):
def init(self):
try:
self.master = server.master(ui, repo.dirstate,
repo.root, timeout)
except server.AlreadyStartedException, inst:
raise util.Abort(str(inst))
def run(self):
try:
self.master.run()
finally:
self.master.shutdown()
service = service()
logfile = ui.config('inotify', 'log')
cmdutil.service(opts, initfn=service.init, runfn=service.run,
logfile=logfile)
def debuginotify(ui, repo, **opts):
'''debugging information for inotify extension
Prints the list of directories being watched by the inotify server.
'''
cli = client(ui, repo)
response = cli.debugquery()
ui.write(_('directories being watched:\n'))
for path in response:
ui.write((' %s/\n') % path)
def reposetup(ui, repo):
if not hasattr(repo, 'dirstate'):
return
class inotifydirstate(repo.dirstate.__class__):
# We'll set this to false after an unsuccessful attempt so that
# next calls of status() within the same instance don't try again
# to start an inotify server if it won't start.
_inotifyon = True
def status(self, match, ignored, clean, unknown=True):
files = match.files()
if '.' in files:
files = []
if self._inotifyon and not ignored:
cli = client(ui, repo)
try:
result = cli.statusquery(files, match, False,
clean, unknown)
except QueryFailed, instr:
ui.debug(str(instr))
# don't retry within the same hg instance
inotifydirstate._inotifyon = False
pass
else:
if ui.config('inotify', 'debug'):
r2 = super(inotifydirstate, self).status(
match, False, clean, unknown)
for c,a,b in zip('LMARDUIC', result, r2):
for f in a:
if f not in b:
ui.warn('*** inotify: %s +%s\n' % (c, f))
for f in b:
if f not in a:
ui.warn('*** inotify: %s -%s\n' % (c, f))
result = r2
return result
return super(inotifydirstate, self).status(
match, ignored, clean, unknown)
repo.dirstate.__class__ = inotifydirstate
cmdtable = {
'debuginotify':
(debuginotify, [], ('hg debuginotify')),
'^inserve':
(serve,
[('d', 'daemon', None, _('run server in background')),
('', 'daemon-pipefds', '', _('used internally by daemon mode')),
('t', 'idle-timeout', '', _('minutes to sit idle before exiting')),
('', 'pid-file', '', _('name of file to write process ID to'))],
_('hg inserve [OPTION]...')),
}

View file

@ -0,0 +1,160 @@
# client.py - inotify status client
#
# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
# Copyright 2009 Nicolas Dumazet <nicdumz@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2, incorporated herein by reference.
from mercurial.i18n import _
import common, server
import errno, os, socket, struct
class QueryFailed(Exception): pass
def start_server(function):
"""
Decorator.
Tries to call function, if it fails, try to (re)start inotify server.
Raise QueryFailed if something went wrong
"""
def decorated_function(self, *args):
result = None
try:
return function(self, *args)
except (OSError, socket.error), err:
autostart = self.ui.configbool('inotify', 'autostart', True)
if err[0] == errno.ECONNREFUSED:
self.ui.warn(_('(found dead inotify server socket; '
'removing it)\n'))
os.unlink(os.path.join(self.root, '.hg', 'inotify.sock'))
if err[0] in (errno.ECONNREFUSED, errno.ENOENT) and autostart:
self.ui.debug(_('(starting inotify server)\n'))
try:
try:
server.start(self.ui, self.dirstate, self.root)
except server.AlreadyStartedException, inst:
# another process may have started its own
# inotify server while this one was starting.
self.ui.debug(str(inst))
except Exception, inst:
self.ui.warn(_('could not start inotify server: '
'%s\n') % inst)
else:
try:
return function(self, *args)
except socket.error, err:
self.ui.warn(_('could not talk to new inotify '
'server: %s\n') % err[-1])
elif err[0] in (errno.ECONNREFUSED, errno.ENOENT):
# silently ignore normal errors if autostart is False
self.ui.debug(_('(inotify server not running)\n'))
else:
self.ui.warn(_('failed to contact inotify server: %s\n')
% err[-1])
self.ui.traceback()
raise QueryFailed('inotify query failed')
return decorated_function
class client(object):
def __init__(self, ui, repo):
self.ui = ui
self.dirstate = repo.dirstate
self.root = repo.root
self.sock = socket.socket(socket.AF_UNIX)
def _connect(self):
sockpath = os.path.join(self.root, '.hg', 'inotify.sock')
try:
self.sock.connect(sockpath)
except socket.error, err:
if err[0] == "AF_UNIX path too long":
sockpath = os.readlink(sockpath)
self.sock.connect(sockpath)
else:
raise
def _send(self, type, data):
"""Sends protocol version number, and the data"""
self.sock.sendall(chr(common.version) + type + data)
self.sock.shutdown(socket.SHUT_WR)
def _receive(self, type):
"""
Read data, check version number, extract headers,
and returns a tuple (data descriptor, header)
Raises QueryFailed on error
"""
cs = common.recvcs(self.sock)
try:
version = ord(cs.read(1))
except TypeError:
# empty answer, assume the server crashed
self.ui.warn(_('received empty answer from inotify server'))
raise QueryFailed('server crashed')
if version != common.version:
self.ui.warn(_('(inotify: received response from incompatible '
'server version %d)\n') % version)
raise QueryFailed('incompatible server version')
readtype = cs.read(4)
if readtype != type:
self.ui.warn(_('(inotify: received \'%s\' response when expecting'
' \'%s\')\n') % (readtype, type))
raise QueryFailed('wrong response type')
hdrfmt = common.resphdrfmts[type]
hdrsize = common.resphdrsizes[type]
try:
resphdr = struct.unpack(hdrfmt, cs.read(hdrsize))
except struct.error:
raise QueryFailed('unable to retrieve query response headers')
return cs, resphdr
def query(self, type, req):
self._connect()
self._send(type, req)
return self._receive(type)
@start_server
def statusquery(self, names, match, ignored, clean, unknown=True):
def genquery():
for n in names:
yield n
states = 'almrx!'
if ignored:
raise ValueError('this is insanity')
if clean: states += 'c'
if unknown: states += '?'
yield states
req = '\0'.join(genquery())
cs, resphdr = self.query('STAT', req)
def readnames(nbytes):
if nbytes:
names = cs.read(nbytes)
if names:
return filter(match, names.split('\0'))
return []
return map(readnames, resphdr)
@start_server
def debugquery(self):
cs, resphdr = self.query('DBUG', '')
nbytes = resphdr[0]
names = cs.read(nbytes)
return names.split('\0')

View file

@ -0,0 +1,51 @@
# server.py - inotify common protocol code
#
# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2, incorporated herein by reference.
import cStringIO, socket, struct
"""
Protocol between inotify clients and server:
Client sending query:
1) send protocol version number
2) send query type (string, 4 letters long)
3) send query parameters:
- For STAT, N+1 \0-separated strings:
1) N different names that need checking
2) 1 string containing all the status types to match
- No parameter needed for DBUG
Server sending query answer:
1) send protocol version number
2) send query type
3) send struct.pack'ed headers describing the length of the content:
e.g. for STAT, receive 8 integers describing the length of the
8 \0-separated string lists ( one list for each lmar!?ic status type )
"""
version = 2
resphdrfmts = {
'STAT': '>llllllll', # status requests
'DBUG': '>l' # debugging queries
}
resphdrsizes = dict((k, struct.calcsize(v))
for k, v in resphdrfmts.iteritems())
def recvcs(sock):
cs = cStringIO.StringIO()
s = True
try:
while s:
s = sock.recv(65536)
cs.write(s)
finally:
sock.shutdown(socket.SHUT_RD)
cs.seek(0)
return cs

View file

@ -0,0 +1,41 @@
# __init__.py - low-level interfaces to the Linux inotify subsystem
# Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
# This library is free software; you can redistribute it and/or modify
# it under the terms of version 2.1 of the GNU Lesser General Public
# License, incorporated herein by reference.
'''Low-level interface to the Linux inotify subsystem.
The inotify subsystem provides an efficient mechanism for file status
monitoring and change notification.
This package provides the low-level inotify system call interface and
associated constants and helper functions.
For a higher-level interface that remains highly efficient, use the
inotify.watcher package.'''
__author__ = "Bryan O'Sullivan <bos@serpentine.com>"
from _inotify import *
procfs_path = '/proc/sys/fs/inotify'
def _read_procfs_value(name):
def read_value():
try:
return int(open(procfs_path + '/' + name).read())
except OSError:
return None
read_value.__doc__ = '''Return the value of the %s setting from /proc.
If inotify is not enabled on this system, return None.''' % name
return read_value
max_queued_events = _read_procfs_value('max_queued_events')
max_user_instances = _read_procfs_value('max_user_instances')
max_user_watches = _read_procfs_value('max_user_watches')

View file

@ -0,0 +1,608 @@
/*
* _inotify.c - Python extension interfacing to the Linux inotify subsystem
*
* Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of version 2.1 of the GNU Lesser General
* Public License, incorporated herein by reference.
*/
#include <Python.h>
#include <alloca.h>
#include <sys/inotify.h>
#include <stdint.h>
#include <sys/ioctl.h>
#include <unistd.h>
static PyObject *init(PyObject *self, PyObject *args)
{
PyObject *ret = NULL;
int fd = -1;
if (!PyArg_ParseTuple(args, ":init"))
goto bail;
Py_BEGIN_ALLOW_THREADS
fd = inotify_init();
Py_END_ALLOW_THREADS
if (fd == -1) {
PyErr_SetFromErrno(PyExc_OSError);
goto bail;
}
ret = PyInt_FromLong(fd);
if (ret == NULL)
goto bail;
goto done;
bail:
if (fd != -1)
close(fd);
Py_CLEAR(ret);
done:
return ret;
}
PyDoc_STRVAR(
init_doc,
"init() -> fd\n"
"\n"
"Initialise an inotify instance.\n"
"Return a file descriptor associated with a new inotify event queue.");
static PyObject *add_watch(PyObject *self, PyObject *args)
{
PyObject *ret = NULL;
uint32_t mask;
int wd = -1;
char *path;
int fd;
if (!PyArg_ParseTuple(args, "isI:add_watch", &fd, &path, &mask))
goto bail;
Py_BEGIN_ALLOW_THREADS
wd = inotify_add_watch(fd, path, mask);
Py_END_ALLOW_THREADS
if (wd == -1) {
PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
goto bail;
}
ret = PyInt_FromLong(wd);
if (ret == NULL)
goto bail;
goto done;
bail:
if (wd != -1)
inotify_rm_watch(fd, wd);
Py_CLEAR(ret);
done:
return ret;
}
PyDoc_STRVAR(
add_watch_doc,
"add_watch(fd, path, mask) -> wd\n"
"\n"
"Add a watch to an inotify instance, or modify an existing watch.\n"
"\n"
" fd: file descriptor returned by init()\n"
" path: path to watch\n"
" mask: mask of events to watch for\n"
"\n"
"Return a unique numeric watch descriptor for the inotify instance\n"
"mapped by the file descriptor.");
static PyObject *remove_watch(PyObject *self, PyObject *args)
{
PyObject *ret = NULL;
uint32_t wd;
int fd;
int r;
if (!PyArg_ParseTuple(args, "iI:remove_watch", &fd, &wd))
goto bail;
Py_BEGIN_ALLOW_THREADS
r = inotify_rm_watch(fd, wd);
Py_END_ALLOW_THREADS
if (r == -1) {
PyErr_SetFromErrno(PyExc_OSError);
goto bail;
}
Py_INCREF(Py_None);
goto done;
bail:
Py_CLEAR(ret);
done:
return ret;
}
PyDoc_STRVAR(
remove_watch_doc,
"remove_watch(fd, wd)\n"
"\n"
" fd: file descriptor returned by init()\n"
" wd: watch descriptor returned by add_watch()\n"
"\n"
"Remove a watch associated with the watch descriptor wd from the\n"
"inotify instance associated with the file descriptor fd.\n"
"\n"
"Removing a watch causes an IN_IGNORED event to be generated for this\n"
"watch descriptor.");
#define bit_name(x) {x, #x}
static struct {
int bit;
const char *name;
PyObject *pyname;
} bit_names[] = {
bit_name(IN_ACCESS),
bit_name(IN_MODIFY),
bit_name(IN_ATTRIB),
bit_name(IN_CLOSE_WRITE),
bit_name(IN_CLOSE_NOWRITE),
bit_name(IN_OPEN),
bit_name(IN_MOVED_FROM),
bit_name(IN_MOVED_TO),
bit_name(IN_CREATE),
bit_name(IN_DELETE),
bit_name(IN_DELETE_SELF),
bit_name(IN_MOVE_SELF),
bit_name(IN_UNMOUNT),
bit_name(IN_Q_OVERFLOW),
bit_name(IN_IGNORED),
bit_name(IN_ONLYDIR),
bit_name(IN_DONT_FOLLOW),
bit_name(IN_MASK_ADD),
bit_name(IN_ISDIR),
bit_name(IN_ONESHOT),
{0}
};
static PyObject *decode_mask(int mask)
{
PyObject *ret = PyList_New(0);
int i;
if (ret == NULL)
goto bail;
for (i = 0; bit_names[i].bit; i++) {
if (mask & bit_names[i].bit) {
if (bit_names[i].pyname == NULL) {
bit_names[i].pyname = PyString_FromString(bit_names[i].name);
if (bit_names[i].pyname == NULL)
goto bail;
}
Py_INCREF(bit_names[i].pyname);
if (PyList_Append(ret, bit_names[i].pyname) == -1)
goto bail;
}
}
goto done;
bail:
Py_CLEAR(ret);
done:
return ret;
}
static PyObject *pydecode_mask(PyObject *self, PyObject *args)
{
int mask;
if (!PyArg_ParseTuple(args, "i:decode_mask", &mask))
return NULL;
return decode_mask(mask);
}
PyDoc_STRVAR(
decode_mask_doc,
"decode_mask(mask) -> list_of_strings\n"
"\n"
"Decode an inotify mask value into a list of strings that give the\n"
"name of each bit set in the mask.");
static char doc[] = "Low-level inotify interface wrappers.";
static void define_const(PyObject *dict, const char *name, uint32_t val)
{
PyObject *pyval = PyInt_FromLong(val);
PyObject *pyname = PyString_FromString(name);
if (!pyname || !pyval)
goto bail;
PyDict_SetItem(dict, pyname, pyval);
bail:
Py_XDECREF(pyname);
Py_XDECREF(pyval);
}
static void define_consts(PyObject *dict)
{
define_const(dict, "IN_ACCESS", IN_ACCESS);
define_const(dict, "IN_MODIFY", IN_MODIFY);
define_const(dict, "IN_ATTRIB", IN_ATTRIB);
define_const(dict, "IN_CLOSE_WRITE", IN_CLOSE_WRITE);
define_const(dict, "IN_CLOSE_NOWRITE", IN_CLOSE_NOWRITE);
define_const(dict, "IN_OPEN", IN_OPEN);
define_const(dict, "IN_MOVED_FROM", IN_MOVED_FROM);
define_const(dict, "IN_MOVED_TO", IN_MOVED_TO);
define_const(dict, "IN_CLOSE", IN_CLOSE);
define_const(dict, "IN_MOVE", IN_MOVE);
define_const(dict, "IN_CREATE", IN_CREATE);
define_const(dict, "IN_DELETE", IN_DELETE);
define_const(dict, "IN_DELETE_SELF", IN_DELETE_SELF);
define_const(dict, "IN_MOVE_SELF", IN_MOVE_SELF);
define_const(dict, "IN_UNMOUNT", IN_UNMOUNT);
define_const(dict, "IN_Q_OVERFLOW", IN_Q_OVERFLOW);
define_const(dict, "IN_IGNORED", IN_IGNORED);
define_const(dict, "IN_ONLYDIR", IN_ONLYDIR);
define_const(dict, "IN_DONT_FOLLOW", IN_DONT_FOLLOW);
define_const(dict, "IN_MASK_ADD", IN_MASK_ADD);
define_const(dict, "IN_ISDIR", IN_ISDIR);
define_const(dict, "IN_ONESHOT", IN_ONESHOT);
define_const(dict, "IN_ALL_EVENTS", IN_ALL_EVENTS);
}
struct event {
PyObject_HEAD
PyObject *wd;
PyObject *mask;
PyObject *cookie;
PyObject *name;
};
static PyObject *event_wd(PyObject *self, void *x)
{
struct event *evt = (struct event *) self;
Py_INCREF(evt->wd);
return evt->wd;
}
static PyObject *event_mask(PyObject *self, void *x)
{
struct event *evt = (struct event *) self;
Py_INCREF(evt->mask);
return evt->mask;
}
static PyObject *event_cookie(PyObject *self, void *x)
{
struct event *evt = (struct event *) self;
Py_INCREF(evt->cookie);
return evt->cookie;
}
static PyObject *event_name(PyObject *self, void *x)
{
struct event *evt = (struct event *) self;
Py_INCREF(evt->name);
return evt->name;
}
static struct PyGetSetDef event_getsets[] = {
{"wd", event_wd, NULL,
"watch descriptor"},
{"mask", event_mask, NULL,
"event mask"},
{"cookie", event_cookie, NULL,
"rename cookie, if rename-related event"},
{"name", event_name, NULL,
"file name"},
{NULL}
};
PyDoc_STRVAR(
event_doc,
"event: Structure describing an inotify event.");
static PyObject *event_new(PyTypeObject *t, PyObject *a, PyObject *k)
{
return (*t->tp_alloc)(t, 0);
}
static void event_dealloc(struct event *evt)
{
Py_XDECREF(evt->wd);
Py_XDECREF(evt->mask);
Py_XDECREF(evt->cookie);
Py_XDECREF(evt->name);
(*evt->ob_type->tp_free)(evt);
}
static PyObject *event_repr(struct event *evt)
{
int wd = PyInt_AsLong(evt->wd);
int cookie = evt->cookie == Py_None ? -1 : PyInt_AsLong(evt->cookie);
PyObject *ret = NULL, *pymasks = NULL, *pymask = NULL;
PyObject *join = NULL;
char *maskstr;
join = PyString_FromString("|");
if (join == NULL)
goto bail;
pymasks = decode_mask(PyInt_AsLong(evt->mask));
if (pymasks == NULL)
goto bail;
pymask = _PyString_Join(join, pymasks);
if (pymask == NULL)
goto bail;
maskstr = PyString_AsString(pymask);
if (evt->name != Py_None) {
PyObject *pyname = PyString_Repr(evt->name, 1);
char *name = pyname ? PyString_AsString(pyname) : "???";
if (cookie == -1)
ret = PyString_FromFormat("event(wd=%d, mask=%s, name=%s)",
wd, maskstr, name);
else
ret = PyString_FromFormat("event(wd=%d, mask=%s, "
"cookie=0x%x, name=%s)",
wd, maskstr, cookie, name);
Py_XDECREF(pyname);
} else {
if (cookie == -1)
ret = PyString_FromFormat("event(wd=%d, mask=%s)",
wd, maskstr);
else {
ret = PyString_FromFormat("event(wd=%d, mask=%s, cookie=0x%x)",
wd, maskstr, cookie);
}
}
goto done;
bail:
Py_CLEAR(ret);
done:
Py_XDECREF(pymask);
Py_XDECREF(pymasks);
Py_XDECREF(join);
return ret;
}
static PyTypeObject event_type = {
PyObject_HEAD_INIT(NULL)
0, /*ob_size*/
"_inotify.event", /*tp_name*/
sizeof(struct event), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor)event_dealloc, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_compare*/
(reprfunc)event_repr, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash */
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
event_doc, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
0, /* tp_methods */
0, /* tp_members */
event_getsets, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
event_new, /* tp_new */
};
PyObject *read_events(PyObject *self, PyObject *args)
{
PyObject *ctor_args = NULL;
PyObject *pybufsize = NULL;
PyObject *ret = NULL;
int bufsize = 65536;
char *buf = NULL;
int nread, pos;
int fd;
if (!PyArg_ParseTuple(args, "i|O:read", &fd, &pybufsize))
goto bail;
if (pybufsize && pybufsize != Py_None)
bufsize = PyInt_AsLong(pybufsize);
ret = PyList_New(0);
if (ret == NULL)
goto bail;
if (bufsize <= 0) {
int r;
Py_BEGIN_ALLOW_THREADS
r = ioctl(fd, FIONREAD, &bufsize);
Py_END_ALLOW_THREADS
if (r == -1) {
PyErr_SetFromErrno(PyExc_OSError);
goto bail;
}
if (bufsize == 0)
goto done;
}
else {
static long name_max;
static long name_fd = -1;
long min;
if (name_fd != fd) {
name_fd = fd;
Py_BEGIN_ALLOW_THREADS
name_max = fpathconf(fd, _PC_NAME_MAX);
Py_END_ALLOW_THREADS
}
min = sizeof(struct inotify_event) + name_max + 1;
if (bufsize < min) {
PyErr_Format(PyExc_ValueError, "bufsize must be at least %d",
(int) min);
goto bail;
}
}
buf = alloca(bufsize);
Py_BEGIN_ALLOW_THREADS
nread = read(fd, buf, bufsize);
Py_END_ALLOW_THREADS
if (nread == -1) {
PyErr_SetFromErrno(PyExc_OSError);
goto bail;
}
ctor_args = PyTuple_New(0);
if (ctor_args == NULL)
goto bail;
pos = 0;
while (pos < nread) {
struct inotify_event *in = (struct inotify_event *) (buf + pos);
struct event *evt;
PyObject *obj;
obj = PyObject_CallObject((PyObject *) &event_type, ctor_args);
if (obj == NULL)
goto bail;
evt = (struct event *) obj;
evt->wd = PyInt_FromLong(in->wd);
evt->mask = PyInt_FromLong(in->mask);
if (in->mask & IN_MOVE)
evt->cookie = PyInt_FromLong(in->cookie);
else {
Py_INCREF(Py_None);
evt->cookie = Py_None;
}
if (in->len)
evt->name = PyString_FromString(in->name);
else {
Py_INCREF(Py_None);
evt->name = Py_None;
}
if (!evt->wd || !evt->mask || !evt->cookie || !evt->name)
goto mybail;
if (PyList_Append(ret, obj) == -1)
goto mybail;
pos += sizeof(struct inotify_event) + in->len;
continue;
mybail:
Py_CLEAR(evt->wd);
Py_CLEAR(evt->mask);
Py_CLEAR(evt->cookie);
Py_CLEAR(evt->name);
Py_DECREF(obj);
goto bail;
}
goto done;
bail:
Py_CLEAR(ret);
done:
Py_XDECREF(ctor_args);
return ret;
}
PyDoc_STRVAR(
read_doc,
"read(fd, bufsize[=65536]) -> list_of_events\n"
"\n"
"\nRead inotify events from a file descriptor.\n"
"\n"
" fd: file descriptor returned by init()\n"
" bufsize: size of buffer to read into, in bytes\n"
"\n"
"Return a list of event objects.\n"
"\n"
"If bufsize is > 0, block until events are available to be read.\n"
"Otherwise, immediately return all events that can be read without\n"
"blocking.");
static PyMethodDef methods[] = {
{"init", init, METH_VARARGS, init_doc},
{"add_watch", add_watch, METH_VARARGS, add_watch_doc},
{"remove_watch", remove_watch, METH_VARARGS, remove_watch_doc},
{"read", read_events, METH_VARARGS, read_doc},
{"decode_mask", pydecode_mask, METH_VARARGS, decode_mask_doc},
{NULL},
};
void init_inotify(void)
{
PyObject *mod, *dict;
if (PyType_Ready(&event_type) == -1)
return;
mod = Py_InitModule3("_inotify", methods, doc);
dict = PyModule_GetDict(mod);
if (dict)
define_consts(dict);
}

View file

@ -0,0 +1,335 @@
# watcher.py - high-level interfaces to the Linux inotify subsystem
# Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
# This library is free software; you can redistribute it and/or modify
# it under the terms of version 2.1 of the GNU Lesser General Public
# License, incorporated herein by reference.
'''High-level interfaces to the Linux inotify subsystem.
The inotify subsystem provides an efficient mechanism for file status
monitoring and change notification.
The watcher class hides the low-level details of the inotify
interface, and provides a Pythonic wrapper around it. It generates
events that provide somewhat more information than raw inotify makes
available.
The autowatcher class is more useful, as it automatically watches
newly-created directories on your behalf.'''
__author__ = "Bryan O'Sullivan <bos@serpentine.com>"
import _inotify as inotify
import array
import errno
import fcntl
import os
import termios
class event(object):
'''Derived inotify event class.
The following fields are available:
mask: event mask, indicating what kind of event this is
cookie: rename cookie, if a rename-related event
path: path of the directory in which the event occurred
name: name of the directory entry to which the event occurred
(may be None if the event happened to a watched directory)
fullpath: complete path at which the event occurred
wd: watch descriptor that triggered this event'''
__slots__ = (
'cookie',
'fullpath',
'mask',
'name',
'path',
'raw',
'wd',
)
def __init__(self, raw, path):
self.path = path
self.raw = raw
if raw.name:
self.fullpath = path + '/' + raw.name
else:
self.fullpath = path
self.wd = raw.wd
self.mask = raw.mask
self.cookie = raw.cookie
self.name = raw.name
def __repr__(self):
r = repr(self.raw)
return 'event(path=' + repr(self.path) + ', ' + r[r.find('(')+1:]
_event_props = {
'access': 'File was accessed',
'modify': 'File was modified',
'attrib': 'Attribute of a directory entry was changed',
'close_write': 'File was closed after being written to',
'close_nowrite': 'File was closed without being written to',
'open': 'File was opened',
'moved_from': 'Directory entry was renamed from this name',
'moved_to': 'Directory entry was renamed to this name',
'create': 'Directory entry was created',
'delete': 'Directory entry was deleted',
'delete_self': 'The watched directory entry was deleted',
'move_self': 'The watched directory entry was renamed',
'unmount': 'Directory was unmounted, and can no longer be watched',
'q_overflow': 'Kernel dropped events due to queue overflow',
'ignored': 'Directory entry is no longer being watched',
'isdir': 'Event occurred on a directory',
}
for k, v in _event_props.iteritems():
mask = getattr(inotify, 'IN_' + k.upper())
def getter(self):
return self.mask & mask
getter.__name__ = k
getter.__doc__ = v
setattr(event, k, property(getter, doc=v))
del _event_props
class watcher(object):
'''Provide a Pythonic interface to the low-level inotify API.
Also adds derived information to each event that is not available
through the normal inotify API, such as directory name.'''
__slots__ = (
'fd',
'_paths',
'_wds',
)
def __init__(self):
'''Create a new inotify instance.'''
self.fd = inotify.init()
self._paths = {}
self._wds = {}
def fileno(self):
'''Return the file descriptor this watcher uses.
Useful for passing to select and poll.'''
return self.fd
def add(self, path, mask):
'''Add or modify a watch.
Return the watch descriptor added or modified.'''
path = os.path.normpath(path)
wd = inotify.add_watch(self.fd, path, mask)
self._paths[path] = wd, mask
self._wds[wd] = path, mask
return wd
def remove(self, wd):
'''Remove the given watch.'''
inotify.remove_watch(self.fd, wd)
self._remove(wd)
def _remove(self, wd):
path_mask = self._wds.pop(wd, None)
if path_mask is not None:
self._paths.pop(path_mask[0])
def path(self, path):
'''Return a (watch descriptor, event mask) pair for the given path.
If the path is not being watched, return None.'''
return self._paths.get(path)
def wd(self, wd):
'''Return a (path, event mask) pair for the given watch descriptor.
If the watch descriptor is not valid or not associated with
this watcher, return None.'''
return self._wds.get(wd)
def read(self, bufsize=None):
'''Read a list of queued inotify events.
If bufsize is zero, only return those events that can be read
immediately without blocking. Otherwise, block until events are
available.'''
events = []
for evt in inotify.read(self.fd, bufsize):
events.append(event(evt, self._wds[evt.wd][0]))
if evt.mask & inotify.IN_IGNORED:
self._remove(evt.wd)
elif evt.mask & inotify.IN_UNMOUNT:
self.close()
return events
def close(self):
'''Shut down this watcher.
All subsequent method calls are likely to raise exceptions.'''
os.close(self.fd)
self.fd = None
self._paths = None
self._wds = None
def __len__(self):
'''Return the number of active watches.'''
return len(self._paths)
def __iter__(self):
'''Yield a (path, watch descriptor, event mask) tuple for each
entry being watched.'''
for path, (wd, mask) in self._paths.iteritems():
yield path, wd, mask
def __del__(self):
if self.fd is not None:
os.close(self.fd)
ignored_errors = [errno.ENOENT, errno.EPERM, errno.ENOTDIR]
def add_iter(self, path, mask, onerror=None):
'''Add or modify watches over path and its subdirectories.
Yield each added or modified watch descriptor.
To ensure that this method runs to completion, you must
iterate over all of its results, even if you do not care what
they are. For example:
for wd in w.add_iter(path, mask):
pass
By default, errors are ignored. If optional arg "onerror" is
specified, it should be a function; it will be called with one
argument, an OSError instance. It can report the error to
continue with the walk, or raise the exception to abort the
walk.'''
# Add the IN_ONLYDIR flag to the event mask, to avoid a possible
# race when adding a subdirectory. In the time between the
# event being queued by the kernel and us processing it, the
# directory may have been deleted, or replaced with a different
# kind of entry with the same name.
submask = mask | inotify.IN_ONLYDIR
try:
yield self.add(path, mask)
except OSError, err:
if onerror and err.errno not in self.ignored_errors:
onerror(err)
for root, dirs, names in os.walk(path, topdown=False, onerror=onerror):
for d in dirs:
try:
yield self.add(root + '/' + d, submask)
except OSError, err:
if onerror and err.errno not in self.ignored_errors:
onerror(err)
def add_all(self, path, mask, onerror=None):
'''Add or modify watches over path and its subdirectories.
Return a list of added or modified watch descriptors.
By default, errors are ignored. If optional arg "onerror" is
specified, it should be a function; it will be called with one
argument, an OSError instance. It can report the error to
continue with the walk, or raise the exception to abort the
walk.'''
return [w for w in self.add_iter(path, mask, onerror)]
class autowatcher(watcher):
'''watcher class that automatically watches newly created directories.'''
__slots__ = (
'addfilter',
)
def __init__(self, addfilter=None):
'''Create a new inotify instance.
This instance will automatically watch newly created
directories.
If the optional addfilter parameter is not None, it must be a
callable that takes one parameter. It will be called each time
a directory is about to be automatically watched. If it returns
True, the directory will be watched if it still exists,
otherwise, it will beb skipped.'''
super(autowatcher, self).__init__()
self.addfilter = addfilter
_dir_create_mask = inotify.IN_ISDIR | inotify.IN_CREATE
def read(self, bufsize=None):
events = super(autowatcher, self).read(bufsize)
for evt in events:
if evt.mask & self._dir_create_mask == self._dir_create_mask:
if self.addfilter is None or self.addfilter(evt):
parentmask = self._wds[evt.wd][1]
# See note about race avoidance via IN_ONLYDIR above.
mask = parentmask | inotify.IN_ONLYDIR
try:
self.add_all(evt.fullpath, mask)
except OSError, err:
if err.errno not in self.ignored_errors:
raise
return events
class threshold(object):
'''Class that indicates whether a file descriptor has reached a
threshold of readable bytes available.
This class is not thread-safe.'''
__slots__ = (
'fd',
'threshold',
'_iocbuf',
)
def __init__(self, fd, threshold=1024):
self.fd = fd
self.threshold = threshold
self._iocbuf = array.array('i', [0])
def readable(self):
'''Return the number of bytes readable on this file descriptor.'''
fcntl.ioctl(self.fd, termios.FIONREAD, self._iocbuf, True)
return self._iocbuf[0]
def __call__(self):
'''Indicate whether the number of readable bytes has met or
exceeded the threshold.'''
return self.readable() >= self.threshold

View file

@ -0,0 +1,874 @@
# server.py - inotify status server
#
# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2, incorporated herein by reference.
from mercurial.i18n import _
from mercurial import osutil, util
import common
import errno, os, select, socket, stat, struct, sys, tempfile, time
try:
import linux as inotify
from linux import watcher
except ImportError:
raise
class AlreadyStartedException(Exception): pass
def join(a, b):
if a:
if a[-1] == '/':
return a + b
return a + '/' + b
return b
def split(path):
c = path.rfind('/')
if c == -1:
return '', path
return path[:c], path[c+1:]
walk_ignored_errors = (errno.ENOENT, errno.ENAMETOOLONG)
def walkrepodirs(dirstate, absroot):
'''Iterate over all subdirectories of this repo.
Exclude the .hg directory, any nested repos, and ignored dirs.'''
def walkit(dirname, top):
fullpath = join(absroot, dirname)
try:
for name, kind in osutil.listdir(fullpath):
if kind == stat.S_IFDIR:
if name == '.hg':
if not top:
return
else:
d = join(dirname, name)
if dirstate._ignore(d):
continue
for subdir in walkit(d, False):
yield subdir
except OSError, err:
if err.errno not in walk_ignored_errors:
raise
yield fullpath
return walkit('', True)
def walk(dirstate, absroot, root):
'''Like os.walk, but only yields regular files.'''
# This function is critical to performance during startup.
def walkit(root, reporoot):
files, dirs = [], []
try:
fullpath = join(absroot, root)
for name, kind in osutil.listdir(fullpath):
if kind == stat.S_IFDIR:
if name == '.hg':
if not reporoot:
return
else:
dirs.append(name)
path = join(root, name)
if dirstate._ignore(path):
continue
for result in walkit(path, False):
yield result
elif kind in (stat.S_IFREG, stat.S_IFLNK):
files.append(name)
yield fullpath, dirs, files
except OSError, err:
if err.errno == errno.ENOTDIR:
# fullpath was a directory, but has since been replaced
# by a file.
yield fullpath, dirs, files
elif err.errno not in walk_ignored_errors:
raise
return walkit(root, root == '')
def _explain_watch_limit(ui, dirstate, rootabs):
path = '/proc/sys/fs/inotify/max_user_watches'
try:
limit = int(file(path).read())
except IOError, err:
if err.errno != errno.ENOENT:
raise
raise util.Abort(_('this system does not seem to '
'support inotify'))
ui.warn(_('*** the current per-user limit on the number '
'of inotify watches is %s\n') % limit)
ui.warn(_('*** this limit is too low to watch every '
'directory in this repository\n'))
ui.warn(_('*** counting directories: '))
ndirs = len(list(walkrepodirs(dirstate, rootabs)))
ui.warn(_('found %d\n') % ndirs)
newlimit = min(limit, 1024)
while newlimit < ((limit + ndirs) * 1.1):
newlimit *= 2
ui.warn(_('*** to raise the limit from %d to %d (run as root):\n') %
(limit, newlimit))
ui.warn(_('*** echo %d > %s\n') % (newlimit, path))
raise util.Abort(_('cannot watch %s until inotify watch limit is raised')
% rootabs)
class pollable(object):
"""
Interface to support polling.
The file descriptor returned by fileno() is registered to a polling
object.
Usage:
Every tick, check if an event has happened since the last tick:
* If yes, call handle_events
* If no, call handle_timeout
"""
poll_events = select.POLLIN
instances = {}
poll = select.poll()
def fileno(self):
raise NotImplementedError
def handle_events(self, events):
raise NotImplementedError
def handle_timeout(self):
raise NotImplementedError
def shutdown(self):
raise NotImplementedError
def register(self, timeout):
fd = self.fileno()
pollable.poll.register(fd, pollable.poll_events)
pollable.instances[fd] = self
self.registered = True
self.timeout = timeout
def unregister(self):
pollable.poll.unregister(self)
self.registered = False
@classmethod
def run(cls):
while True:
timeout = None
timeobj = None
for obj in cls.instances.itervalues():
if obj.timeout is not None and (timeout is None or obj.timeout < timeout):
timeout, timeobj = obj.timeout, obj
try:
events = cls.poll.poll(timeout)
except select.error, err:
if err[0] == errno.EINTR:
continue
raise
if events:
by_fd = {}
for fd, event in events:
by_fd.setdefault(fd, []).append(event)
for fd, events in by_fd.iteritems():
cls.instances[fd].handle_pollevents(events)
elif timeobj:
timeobj.handle_timeout()
def eventaction(code):
"""
Decorator to help handle events in repowatcher
"""
def decorator(f):
def wrapper(self, wpath):
if code == 'm' and wpath in self.lastevent and \
self.lastevent[wpath] in 'cm':
return
self.lastevent[wpath] = code
self.timeout = 250
f(self, wpath)
wrapper.func_name = f.func_name
return wrapper
return decorator
class directory(object):
"""
Representing a directory
* path is the relative path from repo root to this directory
* files is a dict listing the files in this directory
- keys are file names
- values are file status
* dirs is a dict listing the subdirectories
- key are subdirectories names
- values are directory objects
"""
def __init__(self, relpath=''):
self.path = relpath
self.files = {}
self.dirs = {}
def dir(self, relpath):
"""
Returns the directory contained at the relative path relpath.
Creates the intermediate directories if necessary.
"""
if not relpath:
return self
l = relpath.split('/')
ret = self
while l:
next = l.pop(0)
try:
ret = ret.dirs[next]
except KeyError:
d = directory(join(ret.path, next))
ret.dirs[next] = d
ret = d
return ret
def walk(self, states):
"""
yield (filename, status) pairs for items in the trees
that have status in states.
filenames are relative to the repo root
"""
for file, st in self.files.iteritems():
if st in states:
yield join(self.path, file), st
for dir in self.dirs.itervalues():
for e in dir.walk(states):
yield e
def lookup(self, states, path):
"""
yield root-relative filenames that match path, and whose
status are in states:
* if path is a file, yield path
* if path is a directory, yield directory files
* if path is not tracked, yield nothing
"""
if path[-1] == '/':
path = path[:-1]
paths = path.split('/')
# we need to check separately for last node
last = paths.pop()
tree = self
try:
for dir in paths:
tree = tree.dirs[dir]
except KeyError:
# path is not tracked
return
try:
# if path is a directory, walk it
for file, st in tree.dirs[last].walk(states):
yield file
except KeyError:
try:
if tree.files[last] in states:
# path is a file
yield path
except KeyError:
# path is not tracked
pass
class repowatcher(pollable):
"""
Watches inotify events
"""
statuskeys = 'almr!?'
mask = (
inotify.IN_ATTRIB |
inotify.IN_CREATE |
inotify.IN_DELETE |
inotify.IN_DELETE_SELF |
inotify.IN_MODIFY |
inotify.IN_MOVED_FROM |
inotify.IN_MOVED_TO |
inotify.IN_MOVE_SELF |
inotify.IN_ONLYDIR |
inotify.IN_UNMOUNT |
0)
def __init__(self, ui, dirstate, root):
self.ui = ui
self.dirstate = dirstate
self.wprefix = join(root, '')
self.prefixlen = len(self.wprefix)
try:
self.watcher = watcher.watcher()
except OSError, err:
raise util.Abort(_('inotify service not available: %s') %
err.strerror)
self.threshold = watcher.threshold(self.watcher)
self.fileno = self.watcher.fileno
self.tree = directory()
self.statcache = {}
self.statustrees = dict([(s, directory()) for s in self.statuskeys])
self.last_event = None
self.lastevent = {}
self.register(timeout=None)
self.ds_info = self.dirstate_info()
self.handle_timeout()
self.scan()
def event_time(self):
last = self.last_event
now = time.time()
self.last_event = now
if last is None:
return 'start'
delta = now - last
if delta < 5:
return '+%.3f' % delta
if delta < 50:
return '+%.2f' % delta
return '+%.1f' % delta
def dirstate_info(self):
try:
st = os.lstat(self.wprefix + '.hg/dirstate')
return st.st_mtime, st.st_ino
except OSError, err:
if err.errno != errno.ENOENT:
raise
return 0, 0
def add_watch(self, path, mask):
if not path:
return
if self.watcher.path(path) is None:
if self.ui.debugflag:
self.ui.note(_('watching %r\n') % path[self.prefixlen:])
try:
self.watcher.add(path, mask)
except OSError, err:
if err.errno in (errno.ENOENT, errno.ENOTDIR):
return
if err.errno != errno.ENOSPC:
raise
_explain_watch_limit(self.ui, self.dirstate, self.wprefix)
def setup(self):
self.ui.note(_('watching directories under %r\n') % self.wprefix)
self.add_watch(self.wprefix + '.hg', inotify.IN_DELETE)
self.check_dirstate()
def filestatus(self, fn, st):
try:
type_, mode, size, time = self.dirstate._map[fn][:4]
except KeyError:
type_ = '?'
if type_ == 'n':
st_mode, st_size, st_mtime = st
if size == -1:
return 'l'
if size and (size != st_size or (mode ^ st_mode) & 0100):
return 'm'
if time != int(st_mtime):
return 'l'
return 'n'
if type_ == '?' and self.dirstate._ignore(fn):
return 'i'
return type_
def updatefile(self, wfn, osstat):
'''
update the file entry of an existing file.
osstat: (mode, size, time) tuple, as returned by os.lstat(wfn)
'''
self._updatestatus(wfn, self.filestatus(wfn, osstat))
def deletefile(self, wfn, oldstatus):
'''
update the entry of a file which has been deleted.
oldstatus: char in statuskeys, status of the file before deletion
'''
if oldstatus == 'r':
newstatus = 'r'
elif oldstatus in 'almn':
newstatus = '!'
else:
newstatus = None
self.statcache.pop(wfn, None)
self._updatestatus(wfn, newstatus)
def _updatestatus(self, wfn, newstatus):
'''
Update the stored status of a file.
newstatus: - char in (statuskeys + 'ni'), new status to apply.
- or None, to stop tracking wfn
'''
root, fn = split(wfn)
d = self.tree.dir(root)
oldstatus = d.files.get(fn)
# oldstatus can be either:
# - None : fn is new
# - a char in statuskeys: fn is a (tracked) file
if self.ui.debugflag and oldstatus != newstatus:
self.ui.note(_('status: %r %s -> %s\n') %
(wfn, oldstatus, newstatus))
if oldstatus and oldstatus in self.statuskeys \
and oldstatus != newstatus:
del self.statustrees[oldstatus].dir(root).files[fn]
if newstatus in (None, 'i'):
d.files.pop(fn, None)
elif oldstatus != newstatus:
d.files[fn] = newstatus
if newstatus != 'n':
self.statustrees[newstatus].dir(root).files[fn] = newstatus
def check_deleted(self, key):
# Files that had been deleted but were present in the dirstate
# may have vanished from the dirstate; we must clean them up.
nuke = []
for wfn, ignore in self.statustrees[key].walk(key):
if wfn not in self.dirstate:
nuke.append(wfn)
for wfn in nuke:
root, fn = split(wfn)
del self.statustrees[key].dir(root).files[fn]
del self.tree.dir(root).files[fn]
def scan(self, topdir=''):
ds = self.dirstate._map.copy()
self.add_watch(join(self.wprefix, topdir), self.mask)
for root, dirs, files in walk(self.dirstate, self.wprefix, topdir):
for d in dirs:
self.add_watch(join(root, d), self.mask)
wroot = root[self.prefixlen:]
for fn in files:
wfn = join(wroot, fn)
self.updatefile(wfn, self.getstat(wfn))
ds.pop(wfn, None)
wtopdir = topdir
if wtopdir and wtopdir[-1] != '/':
wtopdir += '/'
for wfn, state in ds.iteritems():
if not wfn.startswith(wtopdir):
continue
try:
st = self.stat(wfn)
except OSError:
status = state[0]
self.deletefile(wfn, status)
else:
self.updatefile(wfn, st)
self.check_deleted('!')
self.check_deleted('r')
def check_dirstate(self):
ds_info = self.dirstate_info()
if ds_info == self.ds_info:
return
self.ds_info = ds_info
if not self.ui.debugflag:
self.last_event = None
self.ui.note(_('%s dirstate reload\n') % self.event_time())
self.dirstate.invalidate()
self.handle_timeout()
self.scan()
self.ui.note(_('%s end dirstate reload\n') % self.event_time())
def update_hgignore(self):
# An update of the ignore file can potentially change the
# states of all unknown and ignored files.
# XXX If the user has other ignore files outside the repo, or
# changes their list of ignore files at run time, we'll
# potentially never see changes to them. We could get the
# client to report to us what ignore data they're using.
# But it's easier to do nothing than to open that can of
# worms.
if '_ignore' in self.dirstate.__dict__:
delattr(self.dirstate, '_ignore')
self.ui.note(_('rescanning due to .hgignore change\n'))
self.handle_timeout()
self.scan()
def getstat(self, wpath):
try:
return self.statcache[wpath]
except KeyError:
try:
return self.stat(wpath)
except OSError, err:
if err.errno != errno.ENOENT:
raise
def stat(self, wpath):
try:
st = os.lstat(join(self.wprefix, wpath))
ret = st.st_mode, st.st_size, st.st_mtime
self.statcache[wpath] = ret
return ret
except OSError:
self.statcache.pop(wpath, None)
raise
@eventaction('c')
def created(self, wpath):
if wpath == '.hgignore':
self.update_hgignore()
try:
st = self.stat(wpath)
if stat.S_ISREG(st[0]):
self.updatefile(wpath, st)
except OSError:
pass
@eventaction('m')
def modified(self, wpath):
if wpath == '.hgignore':
self.update_hgignore()
try:
st = self.stat(wpath)
if stat.S_ISREG(st[0]):
if self.dirstate[wpath] in 'lmn':
self.updatefile(wpath, st)
except OSError:
pass
@eventaction('d')
def deleted(self, wpath):
if wpath == '.hgignore':
self.update_hgignore()
elif wpath.startswith('.hg/'):
if wpath == '.hg/wlock':
self.check_dirstate()
return
self.deletefile(wpath, self.dirstate[wpath])
def process_create(self, wpath, evt):
if self.ui.debugflag:
self.ui.note(_('%s event: created %s\n') %
(self.event_time(), wpath))
if evt.mask & inotify.IN_ISDIR:
self.scan(wpath)
else:
self.created(wpath)
def process_delete(self, wpath, evt):
if self.ui.debugflag:
self.ui.note(_('%s event: deleted %s\n') %
(self.event_time(), wpath))
if evt.mask & inotify.IN_ISDIR:
tree = self.tree.dir(wpath)
todelete = [wfn for wfn, ignore in tree.walk('?')]
for fn in todelete:
self.deletefile(fn, '?')
self.scan(wpath)
else:
self.deleted(wpath)
def process_modify(self, wpath, evt):
if self.ui.debugflag:
self.ui.note(_('%s event: modified %s\n') %
(self.event_time(), wpath))
if not (evt.mask & inotify.IN_ISDIR):
self.modified(wpath)
def process_unmount(self, evt):
self.ui.warn(_('filesystem containing %s was unmounted\n') %
evt.fullpath)
sys.exit(0)
def handle_pollevents(self, events):
if self.ui.debugflag:
self.ui.note(_('%s readable: %d bytes\n') %
(self.event_time(), self.threshold.readable()))
if not self.threshold():
if self.registered:
if self.ui.debugflag:
self.ui.note(_('%s below threshold - unhooking\n') %
(self.event_time()))
self.unregister()
self.timeout = 250
else:
self.read_events()
def read_events(self, bufsize=None):
events = self.watcher.read(bufsize)
if self.ui.debugflag:
self.ui.note(_('%s reading %d events\n') %
(self.event_time(), len(events)))
for evt in events:
assert evt.fullpath.startswith(self.wprefix)
wpath = evt.fullpath[self.prefixlen:]
# paths have been normalized, wpath never ends with a '/'
if wpath.startswith('.hg/') and evt.mask & inotify.IN_ISDIR:
# ignore subdirectories of .hg/ (merge, patches...)
continue
if evt.mask & inotify.IN_UNMOUNT:
self.process_unmount(wpath, evt)
elif evt.mask & (inotify.IN_MODIFY | inotify.IN_ATTRIB):
self.process_modify(wpath, evt)
elif evt.mask & (inotify.IN_DELETE | inotify.IN_DELETE_SELF |
inotify.IN_MOVED_FROM):
self.process_delete(wpath, evt)
elif evt.mask & (inotify.IN_CREATE | inotify.IN_MOVED_TO):
self.process_create(wpath, evt)
self.lastevent.clear()
def handle_timeout(self):
if not self.registered:
if self.ui.debugflag:
self.ui.note(_('%s hooking back up with %d bytes readable\n') %
(self.event_time(), self.threshold.readable()))
self.read_events(0)
self.register(timeout=None)
self.timeout = None
def shutdown(self):
self.watcher.close()
def debug(self):
"""
Returns a sorted list of relatives paths currently watched,
for debugging purposes.
"""
return sorted(tuple[0][self.prefixlen:] for tuple in self.watcher)
class server(pollable):
"""
Listens for client queries on unix socket inotify.sock
"""
def __init__(self, ui, root, repowatcher, timeout):
self.ui = ui
self.repowatcher = repowatcher
self.sock = socket.socket(socket.AF_UNIX)
self.sockpath = join(root, '.hg/inotify.sock')
self.realsockpath = None
try:
self.sock.bind(self.sockpath)
except socket.error, err:
if err[0] == errno.EADDRINUSE:
raise AlreadyStartedException(_('could not start server: %s')
% err[1])
if err[0] == "AF_UNIX path too long":
tempdir = tempfile.mkdtemp(prefix="hg-inotify-")
self.realsockpath = os.path.join(tempdir, "inotify.sock")
try:
self.sock.bind(self.realsockpath)
os.symlink(self.realsockpath, self.sockpath)
except (OSError, socket.error), inst:
try:
os.unlink(self.realsockpath)
except:
pass
os.rmdir(tempdir)
if inst.errno == errno.EEXIST:
raise AlreadyStartedException(_('could not start server: %s')
% inst.strerror)
raise
else:
raise
self.sock.listen(5)
self.fileno = self.sock.fileno
self.register(timeout=timeout)
def handle_timeout(self):
pass
def answer_stat_query(self, cs):
names = cs.read().split('\0')
states = names.pop()
self.ui.note(_('answering query for %r\n') % states)
if self.repowatcher.timeout:
# We got a query while a rescan is pending. Make sure we
# rescan before responding, or we could give back a wrong
# answer.
self.repowatcher.handle_timeout()
if not names:
def genresult(states, tree):
for fn, state in tree.walk(states):
yield fn
else:
def genresult(states, tree):
for fn in names:
for f in tree.lookup(states, fn):
yield f
return ['\0'.join(r) for r in [
genresult('l', self.repowatcher.statustrees['l']),
genresult('m', self.repowatcher.statustrees['m']),
genresult('a', self.repowatcher.statustrees['a']),
genresult('r', self.repowatcher.statustrees['r']),
genresult('!', self.repowatcher.statustrees['!']),
'?' in states
and genresult('?', self.repowatcher.statustrees['?'])
or [],
[],
'c' in states and genresult('n', self.repowatcher.tree) or [],
]]
def answer_dbug_query(self):
return ['\0'.join(self.repowatcher.debug())]
def handle_pollevents(self, events):
for e in events:
self.handle_pollevent()
def handle_pollevent(self):
sock, addr = self.sock.accept()
cs = common.recvcs(sock)
version = ord(cs.read(1))
if version != common.version:
self.ui.warn(_('received query from incompatible client '
'version %d\n') % version)
try:
# try to send back our version to the client
# this way, the client too is informed of the mismatch
sock.sendall(chr(common.version))
except:
pass
return
type = cs.read(4)
if type == 'STAT':
results = self.answer_stat_query(cs)
elif type == 'DBUG':
results = self.answer_dbug_query()
else:
self.ui.warn(_('unrecognized query type: %s\n') % type)
return
try:
try:
v = chr(common.version)
sock.sendall(v + type + struct.pack(common.resphdrfmts[type],
*map(len, results)))
sock.sendall(''.join(results))
finally:
sock.shutdown(socket.SHUT_WR)
except socket.error, err:
if err[0] != errno.EPIPE:
raise
def shutdown(self):
self.sock.close()
try:
os.unlink(self.sockpath)
if self.realsockpath:
os.unlink(self.realsockpath)
os.rmdir(os.path.dirname(self.realsockpath))
except OSError, err:
if err.errno != errno.ENOENT:
raise
class master(object):
def __init__(self, ui, dirstate, root, timeout=None):
self.ui = ui
self.repowatcher = repowatcher(ui, dirstate, root)
self.server = server(ui, root, self.repowatcher, timeout)
def shutdown(self):
for obj in pollable.instances.itervalues():
obj.shutdown()
def run(self):
self.repowatcher.setup()
self.ui.note(_('finished setup\n'))
if os.getenv('TIME_STARTUP'):
sys.exit(0)
pollable.run()
def start(ui, dirstate, root):
def closefds(ignore):
# (from python bug #1177468)
# close all inherited file descriptors
# Python 2.4.1 and later use /dev/urandom to seed the random module's RNG
# a file descriptor is kept internally as os._urandomfd (created on demand
# the first time os.urandom() is called), and should not be closed
try:
os.urandom(4)
urandom_fd = getattr(os, '_urandomfd', None)
except AttributeError:
urandom_fd = None
ignore.append(urandom_fd)
for fd in range(3, 256):
if fd in ignore:
continue
try:
os.close(fd)
except OSError:
pass
m = master(ui, dirstate, root)
sys.stdout.flush()
sys.stderr.flush()
pid = os.fork()
if pid:
return pid
closefds(pollable.instances.keys())
os.setsid()
fd = os.open('/dev/null', os.O_RDONLY)
os.dup2(fd, 0)
if fd > 0:
os.close(fd)
fd = os.open(ui.config('inotify', 'log', '/dev/null'),
os.O_RDWR | os.O_CREAT | os.O_TRUNC)
os.dup2(fd, 1)
os.dup2(fd, 2)
if fd > 2:
os.close(fd)
try:
m.run()
finally:
m.shutdown()
os._exit(0)