#!bin/python """ other tools POST themselves to here as subscribers to the graph. They are providing a URL we can PUT to with graph updates. we immediately PUT them back all the contents of the graph as a bunch of adds. later we PUT them back with patches (del/add lists) when there are changes. If we fail to reach a registered caller, we forget about it for future calls. We could PUT empty diffs as a heartbeat to notice disappearing callers faster. A caller can submit a patch which we'll persist and broadcast to every other client. Global data undo should probably happen within this service. Some operations should not support undo, such as updating the default position of a window. How will we separate those? A blacklist of subj+pred pairs that don't save undo? Or just save the updates like everything else, but when you press undo, there's a way to tell which updates *should* be part of your app's undo system? Maybe some subgraphs are for transient data (e.g. current timecode, mouse position in curvecalc) that only some listeners want to hear about. Deletes are graph-specific, so callers may be surprised to delete a stmt from one graph but then find that statement is still true. Alternate plan: would it help to insist that every patch is within only one subgraph? I think it's ok for them to span multiple ones. Inserts can be made on any subgraphs, and each subgraph is saved in its own file. The file might not be in a format that can express graphs, so I'm just going to not store the subgraph URI in any file. I don't support wildcard deletes, and there are race conditions where a s-p could end up with unexpected multiple objects. Every client needs to be ready for this. We watch the files and push their own changes back to the clients. Persist our client list, to survive restarts. In another rdf file? A random json one? memcache? Also hold the recent changes. We're not logging everything forever, though, since the output files and a VCS shall be used for that Bnodes: this rdfdb graph might be able to track bnodes correctly, and they make for more compact n3 files. I'm not sure if it's going to be hard to keep the client bnodes in sync though. File rereads would be hard, if ever a bnode was used across graphs, so that probably should not be allowed. Our API: GET / ui GET /graph the whole graph, or a query from it (needed? just for ui browsing?) PUT /patches clients submit changes GET /patches (recent) patches from clients POST /graphClients clientUpdate={uri} to subscribe GET /graphClients current clients format: json {"adds" : [[quads]...], "deletes": [[quads]], "senderUpdateUri" : tooluri, "created":tttt // maybe to help resolve some conflicts } maybe use some http://json-ld.org/ in there. proposed rule feature: rdfdb should be able to watch a pair of (sourceFile, rulesFile) and rerun the rules when either one changes. Should the sourceFile be able to specify its own rules file? That would be easier configuration. How do edits work? Not allowed? Patch the source only? Also see the source graph loaded into a different ctx, and you can edit that one and see the results in the output context? Our web ui: sections registered clients recent patches, each one says what client it came from. You can reverse them here. We should be able to take patches that are close in time and keep updating the same data (e.g. a stream of changes as the user drags a slider) and collapse them into a single edit for clarity. Ways to display patches, using labels and creator/subj icons where possible: set 's

to changed 's from to added to

raw messages for debugging this client ctx urls take you to-> files, who's dirty, have we seen external changes, notice big files that are taking a long time to save graph contents. plain rdf browser like an outliner or something. clicking any resource from the other displays takes you to this, focused on that resource """ from twisted.internet import reactor import twisted.internet.error from twisted.python.filepath import FilePath from twisted.internet.inotify import humanReadableMask, IN_CREATE import sys, optparse, logging, json, os import cyclone.web, cyclone.httpclient, cyclone.websocket sys.path.append(".") from light9 import networking, showconfig, prof from rdflib import ConjunctiveGraph, URIRef, Graph from light9.rdfdb.graphfile import GraphFile from light9.rdfdb.patch import Patch, ALLSTMTS from light9.rdfdb.rdflibpatch import patchQuads from light9.rdfdb.file_vs_uri import correctToTopdirPrefix, fileForUri, uriFromFile from light9.rdfdb.patchsender import sendPatch from light9.rdfdb.patchreceiver import makePatchEndpointPutMethod from twisted.internet.inotify import INotify from run_local import log log.setLevel(logging.DEBUG) from lib.cycloneerr import PrettyErrorHandler class Client(object): """ one of our syncedgraph clients """ def __init__(self, updateUri, label, db): self.db = db self.label = label self.updateUri = updateUri self.sendAll() def __repr__(self): return "<%s client at %s>" % (self.label, self.updateUri) def sendAll(self): """send the client the whole graph contents""" log.info("sending all graphs to %s at %s" % (self.label, self.updateUri)) self.sendPatch(Patch( addQuads=self.db.graph.quads(ALLSTMTS), delQuads=[])) def sendPatch(self, p): return sendPatch(self.updateUri, p) class WatchedFiles(object): """ find files, notice new files. This object watches directories. Each GraphFile watches its own file. """ def __init__(self, dirUriMap, patch, getSubgraph): self.dirUriMap = dirUriMap # {abspath : uri prefix} self.patch, self.getSubgraph = patch, getSubgraph self.graphFiles = {} # context uri : GraphFile self.notifier = INotify() self.notifier.startReading() self.findAndLoadFiles() def findAndLoadFiles(self): self.initialLoad = True try: for topdir in self.dirUriMap: for dirpath, dirnames, filenames in os.walk(topdir): for base in filenames: self.watchFile(os.path.join(dirpath, base)) self.notifier.watch(FilePath(dirpath), autoAdd=True, callbacks=[self.dirChange]) finally: self.initialLoad = False def dirChange(self, watch, path, mask): if mask & IN_CREATE: log.debug("%s created; consider adding a watch", path) self.watchFile(path.path) def watchFile(self, inFile): """ consider adding a GraphFile to self.graphFiles inFile needs to be a relative path, not an absolute (e.g. in a FilePath) because we use its exact relative form in the context URI """ if not os.path.isfile(inFile): return inFile = correctToTopdirPrefix(self.dirUriMap, inFile) if os.path.splitext(inFile)[1] not in ['.n3']: return # an n3 file with rules makes it all the way past this reading # and the serialization. Then, on the receiving side, a # SyncedGraph calls graphFromNQuad on the incoming data and # has a parse error. I'm not sure where this should be fixed # yet. if '-rules' in inFile: return # for legacy versions, compile all the config stuff you want # read into one file called config.n3. New versions won't read # it. if inFile.endswith("config.n3"): return ctx = uriFromFile(self.dirUriMap, inFile) gf = GraphFile(self.notifier, inFile, ctx, self.patch, self.getSubgraph) self.graphFiles[ctx] = gf log.info("%s do initial read", inFile) gf.reread() def aboutToPatch(self, ctx): """ warn us that a patch is about to come to this context. it's more straightforward to create the new file now this is meant to make the file before we add triples, so we wouldn't see the blank file and lose those triples. But it didn't work, so there are other measures that make us not lose the triples from a new file. Calling this before patching the graph is still a reasonable thing to do, though. """ g = self.getSubgraph(ctx) if ctx not in self.graphFiles: outFile = fileForUri(self.dirUriMap, ctx) log.info("starting new file %r", outFile) self.graphFiles[ctx] = GraphFile(self.notifier, outFile, ctx, self.patch, self.getSubgraph) def dirtyFiles(self, ctxs): """mark dirty the files that we watch in these contexts. the ctx might not be a file that we already read; it might be for a new file we have to create, or it might be for a transient context that we're not going to save if it's a ctx with no file, error """ for ctx in ctxs: g = self.getSubgraph(ctx) self.graphFiles[ctx].dirty(g) class Db(object): """ the master graph, all the connected clients, all the files we're watching """ def __init__(self, dirUriMap): self.clients = [] self.graph = ConjunctiveGraph() self.watchedFiles = WatchedFiles(dirUriMap, self.patch, self.getSubgraph) self.summarizeToLog() def patch(self, p, dueToFileChange=False): """ apply this patch to the master graph then notify everyone about it dueToFileChange if this is a patch describing an edit we read *from* the file (such that we shouldn't write it back to the file) if p has a senderUpdateUri attribute, we won't send this patch back to the sender with that updateUri """ ctx = p.getContext() log.info("patching graph %s -%d +%d" % ( ctx, len(p.delQuads), len(p.addQuads))) if hasattr(self, 'watchedFiles'): # not available during startup self.watchedFiles.aboutToPatch(ctx) patchQuads(self.graph, p.delQuads, p.addQuads, perfect=True) self._sendPatch(p) if not dueToFileChange: self.watchedFiles.dirtyFiles([ctx]) sendToLiveClients(asJson=p.jsonRepr) def _sendPatch(self, p): senderUpdateUri = getattr(p, 'senderUpdateUri', None) for c in self.clients: if c.updateUri == senderUpdateUri: # this client has self-applied the patch already continue d = c.sendPatch(p) d.addErrback(self.clientErrored, c) def clientErrored(self, err, c): err.trap(twisted.internet.error.ConnectError) log.info("connection error- dropping client %r" % c) self.clients.remove(c) self.sendClientsToAllLivePages() def summarizeToLog(self): log.info("contexts in graph (%s total stmts):" % len(self.graph)) for c in self.graph.contexts(): log.info(" %s: %s statements" % (c.identifier, len(self.getSubgraph(c.identifier)))) def getSubgraph(self, uri): """ this is meant to return a live view of the given subgraph, but if i'm still working around an rdflib bug, it might return a copy and it's returning triples, but I think quads would be better """ # this is returning an empty Graph :( #return self.graph.get_context(uri) g = Graph() for s in self.graph.triples(ALLSTMTS, uri): g.add(s) return g def addClient(self, updateUri, label): [self.clients.remove(c) for c in self.clients if c.updateUri == updateUri] log.info("new client %s at %s" % (label, updateUri)) self.clients.append(Client(updateUri, label, self)) self.sendClientsToAllLivePages() def sendClientsToAllLivePages(self): sendToLiveClients({"clients":[ dict(updateUri=c.updateUri, label=c.label) for c in self.clients]}) class GraphResource(PrettyErrorHandler, cyclone.web.RequestHandler): def get(self): accept = self.request.headers.get('accept', '') format = 'n3' if accept == 'text/plain': format = 'nt' elif accept == 'application/n-quads': format = 'nquads' self.write(self.settings.db.graph.serialize(format=format)) class Patches(PrettyErrorHandler, cyclone.web.RequestHandler): def __init__(self, *args, **kw): cyclone.web.RequestHandler.__init__(self, *args, **kw) p = makePatchEndpointPutMethod(self.settings.db.patch) self.put = lambda: p(self) def get(self): pass class GraphClients(PrettyErrorHandler, cyclone.web.RequestHandler): def get(self): pass def post(self): upd = self.get_argument("clientUpdate") try: self.settings.db.addClient(upd, self.get_argument("label")) except: import traceback traceback.print_exc() raise liveClients = set() def sendToLiveClients(d=None, asJson=None): j = asJson or json.dumps(d) for c in liveClients: c.sendMessage(j) class Live(cyclone.websocket.WebSocketHandler): def connectionMade(self, *args, **kwargs): log.info("websocket opened") liveClients.add(self) self.settings.db.sendClientsToAllLivePages() def connectionLost(self, reason): log.info("websocket closed") liveClients.remove(self) def messageReceived(self, message): log.info("got message %s" % message) self.sendMessage(message) class NoExts(cyclone.web.StaticFileHandler): # .xhtml pages can be get() without .xhtml on them def get(self, path, *args, **kw): if path and '.' not in path: path = path + ".xhtml" cyclone.web.StaticFileHandler.get(self, path, *args, **kw) if __name__ == "__main__": logging.basicConfig() log = logging.getLogger() parser = optparse.OptionParser() parser.add_option("-v", "--verbose", action="store_true", help="logging.DEBUG") (options, args) = parser.parse_args() log.setLevel(logging.DEBUG if options.verbose else logging.INFO) db = Db(dirUriMap={os.environ['LIGHT9_SHOW'].rstrip('/') + '/': showconfig.showUri()}) from twisted.python import log as twlog twlog.startLogging(sys.stdout) reactor.listenTCP(networking.rdfdb.port, cyclone.web.Application(handlers=[ (r'/live', Live), (r'/graph', GraphResource), (r'/patches', Patches), (r'/graphClients', GraphClients), (r'/(.*)', NoExts, {"path" : "light9/rdfdb/web", "default_filename" : "index.xhtml"}), ], debug=True, db=db)) log.info("serving on %s" % networking.rdfdb.port) prof.run(reactor.run, profile=None)