Files
@ 1aa91a31c0e2
Branch filter:
Location: light9/bin/collector
1aa91a31c0e2
7.7 KiB
text/plain
reformat some missed files
Ignore-this: f13152975437adeb48ed619ab676365e
Ignore-this: f13152975437adeb48ed619ab676365e
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 | #!bin/python
"""
Collector receives device attrs from multiple senders, combines
them, and sends output attrs to hardware. The combining part has
custom code for some attributes.
Input can be over http or zmq.
"""
from run_local import log
from rdflib import URIRef, Literal
from twisted.internet import reactor, utils
from txzmq import ZmqEndpoint, ZmqFactory, ZmqPullConnection
import json
import logging
import optparse
import time
import traceback
import cyclone.web, cyclone.websocket
from greplin import scales
from cycloneerr import PrettyErrorHandler
from light9.collector.output import EnttecDmx, Udmx, DummyOutput
from light9.collector.collector import Collector
from light9.namespaces import L9
from light9 import networking
from rdfdb.syncedgraph import SyncedGraph
from light9.greplin_cyclone import StatsForCyclone
def parseJsonMessage(msg):
body = json.loads(msg)
settings = []
for device, attr, value in body['settings']:
if isinstance(value, str) and value.startswith('http'):
value = URIRef(value)
else:
value = Literal(value)
settings.append((URIRef(device), URIRef(attr), value))
return body['client'], body['clientSession'], settings, body['sendTime']
def startZmq(port, collector):
stats = scales.collection('/zmqServer', scales.PmfStat('setAttr'))
zf = ZmqFactory()
addr = 'tcp://*:%s' % port
log.info('creating zmq endpoint at %r', addr)
e = ZmqEndpoint('bind', addr)
class Pull(ZmqPullConnection):
#highWaterMark = 3
def onPull(self, message):
with stats.setAttr.time():
# todo: new compressed protocol where you send all URIs up
# front and then use small ints to refer to devices and
# attributes in subsequent requests.
client, clientSession, settings, sendTime = parseJsonMessage(
message[0])
collector.setAttrs(client, clientSession, settings, sendTime)
s = Pull(zf, e)
class WebListeners(object):
def __init__(self):
self.clients = []
self.pendingMessageForDev = {} # dev: (attrs, outputmap)
self.lastFlush = 0
def addClient(self, client):
self.clients.append([client, {}]) # seen = {dev: attrs}
log.info('added client %s %s', len(self.clients), client)
def delClient(self, client):
self.clients = [[c, t] for c, t in self.clients if c != client]
log.info('delClient %s, %s left', client, len(self.clients))
def outputAttrsSet(self, dev, attrs, outputMap):
"""called often- don't be slow"""
self.pendingMessageForDev[dev] = (attrs, outputMap)
try:
self._flush()
except Exception:
traceback.print_exc()
raise
def _flush(self):
now = time.time()
if now < self.lastFlush + .05 or not self.clients:
return
self.lastFlush = now
while self.pendingMessageForDev:
dev, (attrs, outputMap) = self.pendingMessageForDev.popitem()
msg = None # lazy, since makeMsg is slow
# this omits repeats, but can still send many
# messages/sec. Not sure if piling up messages for the browser
# could lead to slowdowns in the real dmx output.
for client, seen in self.clients:
if seen.get(dev) == attrs:
continue
if msg is None:
msg = self.makeMsg(dev, attrs, outputMap)
seen[dev] = attrs
client.sendMessage(msg)
def makeMsg(self, dev, attrs, outputMap):
attrRows = []
for attr, val in list(attrs.items()):
output, index = outputMap[(dev, attr)]
attrRows.append({
'attr': attr.rsplit('/')[-1],
'val': val,
'chan': (output.shortId(), index + 1)
})
attrRows.sort(key=lambda r: r['chan'])
for row in attrRows:
row['chan'] = '%s %s' % (row['chan'][0], row['chan'][1])
msg = json.dumps({'outputAttrsSet': {
'dev': dev,
'attrs': attrRows
}},
sort_keys=True)
return msg
class Updates(cyclone.websocket.WebSocketHandler):
def connectionMade(self, *args, **kwargs):
log.info('socket connect %s', self)
self.settings.listeners.addClient(self)
def connectionLost(self, reason):
self.settings.listeners.delClient(self)
def messageReceived(self, message):
json.loads(message)
stats = scales.collection('/webServer', scales.PmfStat('setAttr'))
class Attrs(PrettyErrorHandler, cyclone.web.RequestHandler):
def put(self):
with stats.setAttr.time():
client, clientSession, settings, sendTime = parseJsonMessage(
self.request.body)
self.settings.collector.setAttrs(client, clientSession, settings,
sendTime)
self.set_status(202)
def launch(graph, doLoadTest=False):
try:
# todo: drive outputs with config files
outputs = [
# EnttecDmx(L9['output/dmxA/'], '/dev/dmx3', 80),
Udmx(L9['output/dmxA/'], bus=5, numChannels=80),
#DummyOutput(L9['output/dmxA/'], 80),
Udmx(L9['output/dmxB/'], bus=7, numChannels=500),
]
except Exception:
log.error("setting up outputs:")
traceback.print_exc()
raise
listeners = WebListeners()
c = Collector(graph, outputs, listeners)
startZmq(networking.collectorZmq.port, c)
reactor.listenTCP(networking.collector.port,
cyclone.web.Application(handlers=[
(r'/()', cyclone.web.StaticFileHandler, {
"path": "light9/collector/web",
"default_filename": "index.html"
}),
(r'/updates', Updates),
(r'/attrs', Attrs),
(r'/stats', StatsForCyclone),
],
collector=c,
listeners=listeners),
interface='::')
log.info('serving http on %s, zmq on %s', networking.collector.port,
networking.collectorZmq.port)
if doLoadTest:
# in a subprocess since we don't want this client to be
# cooperating with the main event loop and only sending
# requests when there's free time
def afterWarmup():
log.info('running collector_loadtest')
d = utils.getProcessValue('bin/python',
['bin/collector_loadtest.py'])
def done(*a):
log.info('loadtest done')
reactor.stop()
d.addCallback(done)
reactor.callLater(2, afterWarmup)
def main():
parser = optparse.OptionParser()
parser.add_option("-v",
"--verbose",
action="store_true",
help="logging.DEBUG")
parser.add_option("--loadtest",
action="store_true",
help="call myself with some synthetic load then exit")
(options, args) = parser.parse_args()
log.setLevel(logging.DEBUG if options.verbose else logging.INFO)
logging.getLogger('colormath').setLevel(logging.INFO)
graph = SyncedGraph(networking.rdfdb.url, "collector")
graph.initiallySynced.addCallback(lambda _: launch(graph, options.loadtest)
).addErrback(lambda e: reactor.crash())
reactor.run()
if __name__ == '__main__':
main()
|