changeset 1545:c7e52977c5ca

DeviceSettings is used in more places Ignore-this: 3cf85ef50ddd807c89e9af53832e9d5c
author Drew Perttula <drewp@bigasterisk.com>
date Fri, 19 May 2017 07:42:27 +0000
parents fc5675f5b756
children eeadad4a998e
files light9/effect/sequencer.py light9/effect/settings.py light9/effect/settings_test.py light9/paint/solve.py light9/paint/solve_test.py
diffstat 5 files changed, 210 insertions(+), 91 deletions(-) [+]
line wrap: on
line diff
--- a/light9/effect/sequencer.py	Fri May 19 07:35:29 2017 +0000
+++ b/light9/effect/sequencer.py	Fri May 19 07:42:27 2017 +0000
@@ -17,6 +17,8 @@
 from light9.namespaces import L9, RDF
 from light9.vidref.musictime import MusicTime
 from light9.effect import effecteval
+from light9.effect.settings import DeviceSettings
+
 from greplin import scales
 from txzmq import ZmqEndpoint, ZmqFactory, ZmqPushConnection
 
@@ -38,7 +40,7 @@
 
 
 def toCollectorJson(client, session, settings):
-    return json.dumps({'settings': settings,
+    return json.dumps({'settings': settings.asList() if isinstance(settings, DeviceSettings) else settings,
                        'client': client,
                        'clientSession': session,
                        'sendTime': time.time(),
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/light9/effect/settings.py	Fri May 19 07:42:27 2017 +0000
@@ -0,0 +1,160 @@
+"""
+Data structure and convertors for a table of (device,attr,value)
+rows. These might be effect attrs ('strength'), device attrs ('rx'),
+or output attrs (dmx channel).
+"""
+import decimal
+from rdflib import URIRef, Literal
+from light9.namespaces import RDF, L9, DEV
+from light9.rdfdb.patch import Patch
+
+
+def getVal(graph, subj):
+    lit = graph.value(subj, L9['value']) or graph.value(subj, L9['scaledValue'])
+    ret = lit.toPython()
+    if isinstance(ret, decimal.Decimal):
+        ret = float(ret)
+    return ret
+
+class _Settings(object):
+    def __init__(self, graph, settingsList):
+        self.graph = graph # for looking up all possible attrs
+        self._compiled = {} # dev: { attr: val }
+        for row in settingsList:
+            self._compiled.setdefault(row[0], {})[row[1]] = row[2]
+        # self._compiled may not be final yet- see _fromCompiled
+
+    def __hash__(self):
+        itemed = tuple([(d, tuple([(a, v) for a, v in sorted(av.items())]))
+                        for d, av in sorted(self._compiled.items())])
+        return hash(itemed)
+
+    def __eq__(self, other):
+        if not issubclass(other.__class__, self.__class__):
+            raise TypeError("can't compare %r to %r" % (self.__class__, other.__class__))
+        return self._compiled == other._compiled
+
+    def __ne__(self, other):
+        return not self == other
+
+
+    def __repr__(self):
+        words = []
+        def accum():
+            for dev, av in self._compiled.iteritems():
+                for attr, val in av.iteritems():
+                    words.append('%s.%s=%g' % (dev.rsplit('/')[-1],
+                                               attr.rsplit('/')[-1],
+                                               val))
+                    if len(words) > 5:
+                        words.append('...')
+                        return
+        accum()
+        return '<%s %s>' % (self.__class__.__name__, ' '.join(words))
+        
+    def getValue(self, dev, attr):
+        return self._compiled.get(dev, {}).get(attr, 0)
+        
+    @classmethod
+    def _fromCompiled(cls, graph, compiled):
+        obj = cls(graph, [])
+        obj._compiled = compiled
+        return obj
+            
+    @classmethod
+    def fromResource(cls, graph, subj):
+        settingsList = []
+        with graph.currentState() as g:
+            for s in g.objects(subj, L9['setting']):
+                d = g.value(s, L9['device'])
+                da = g.value(s, L9['deviceAttr'])
+                v = getVal(g, s)
+                settingsList.append((d, da, v))
+        return cls(graph, settingsList)
+
+    @classmethod
+    def fromVector(cls, graph, vector):
+        compiled = {}
+        for (d, a), v in zip(cls(graph, [])._vectorKeys(), vector):
+            compiled.setdefault(d, {})[a] = v
+        return cls._fromCompiled(graph, compiled)
+
+    def _vectorKeys(self):
+        """stable order of all the dev,attr pairs for this type of settings"""
+        raise NotImplementedError
+
+    def asList(self):
+        """old style list of (dev, attr, val) tuples"""
+        out = []
+        for dev, av in self._compiled.iteritems():
+            for attr, val in av.iteritems():
+                out.append((dev, attr, val))
+        return out
+
+    def devices(self):
+        return self._compiled.keys()
+        
+    def toVector(self):
+        out = []
+        for dev, attr in self._vectorKeys():
+            out.append(self._compiled.get(dev, {}).get(attr, 0))
+        return out
+
+    def byDevice(self):
+        for dev, av in self._compiled.iteritems():
+            yield dev, self.__class__._fromCompiled(self.graph, {dev: av})
+
+    def ofDevice(self, dev):
+        return self.__class__._fromCompiled(self.graph,
+                                            {dev: self._compiled.get(dev, {})})
+        
+    def distanceTo(self, other):
+        raise NotImplementedError
+        dist = 0
+        for key in set(attrs1).union(set(attrs2)):
+            if key not in attrs1 or key not in attrs2:
+                dist += 999
+            else:
+                dist += abs(attrs1[key] - attrs2[key])
+        return dist
+
+    def addStatements(self, subj, ctx, settingRoot, settingsSubgraphCache):
+        """
+        settingRoot can be shared across images (or even wider if you want)
+        """
+        # ported from live.coffee
+        add = []
+        for i, (dev, attr, val) in enumerate(self.asList()):
+            # hopefully a unique number for the setting so repeated settings converge
+            settingHash = hash((dev, attr, val)) % 9999999
+            setting = URIRef('%sset%s' % (settingRoot, settingHash))
+            add.append((subj, L9['setting'], setting, ctx))
+            if setting in settingsSubgraphCache:              
+                continue
+                
+            scaledAttributeTypes = [L9['color'], L9['brightness'], L9['uv']]
+            settingType = L9['scaledValue'] if attr in scaledAttributeTypes else L9['value']
+            add.extend([
+                (setting, L9['device'], dev, ctx),
+                (setting, L9['deviceAttr'], attr, ctx),
+                (setting, settingType, Literal(val), ctx),
+                ])
+            settingsSubgraphCache.add(setting)
+            
+        self.graph.patch(Patch(addQuads=add))
+
+
+class DeviceSettings(_Settings):
+    def _vectorKeys(self):
+        with self.graph.currentState() as g:
+            devs = set() # devclass, dev
+            for dc in g.subjects(RDF.type, L9['DeviceClass']):
+                for dev in g.subjects(RDF.type, dc):
+                    devs.add((dc, dev))
+
+            keys = []
+            for dc, dev in sorted(devs):
+                for attr in sorted(g.objects(dc, L9['deviceAttr'])):
+                    keys.append((dev, attr))
+        return keys
+    
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/light9/effect/settings_test.py	Fri May 19 07:42:27 2017 +0000
@@ -0,0 +1,13 @@
+import unittest
+from light9.rdfdb.localsyncedgraph import LocalSyncedGraph
+from light9.namespaces import RDF, L9, DEV
+from light9.effect.settings import DeviceSettings
+
+class TestDeviceSettings(unittest.TestCase):
+    def setUp(self):
+        self.graph = LocalSyncedGraph(files=['show/dance2017/cam/test/lightConfig.n3',
+                                             'show/dance2017/cam/test/bg.n3'])
+
+    def testToVectorZero(self):
+        ds = DeviceSettings(self.graph, [])
+        self.assertEqual([0] * 20, ds.toVector())
--- a/light9/paint/solve.py	Fri May 19 07:35:29 2017 +0000
+++ b/light9/paint/solve.py	Fri May 19 07:42:27 2017 +0000
@@ -5,6 +5,8 @@
 import scipy.misc, scipy.ndimage, scipy.optimize
 import cairo
 
+from light9.effect.settings import DeviceSettings
+
 # numpy images in this file are (x, y, c) layout.
 
 def numpyFromCairo(surface):
@@ -45,40 +47,14 @@
 def brightest(img):
     return numpy.amax(img, axis=(0, 1))
 
-def getVal(graph, subj):
-    lit = graph.value(subj, L9['value']) or graph.value(subj, L9['scaledValue'])
-    ret = lit.toPython()
-    if isinstance(ret, decimal.Decimal):
-        ret = float(ret)
-    return ret
 
-def loadNumpy(path, thumb=(100, 100)):
-    img = Image.open(path)
-    img.thumbnail(thumb)
-    return numpyFromPil(img)
-
-
-class Settings(object):
-    def __init__(self, graph, settingsList):
-        self._compiled = {} # dev: { attr: val }
-        for row in settingsList:
-            self._compiled.setdefault(row[0], {})[row[1]] = row[2]
-
-    def toVector(self):
-        """
-        
-    def fromVector(cls, graph, vector):
-        """update our settings from a vector with the same ordering as toVector would make"""
-    def distanceTo(self, other):
-        
-    
 class Solver(object):
     def __init__(self, graph):
         self.graph = graph
         self.samples = {} # uri: Image array
         self.fromPath = {} # basename: image array
         self.blurredSamples = {}
-        self.sampleSettings = {} # (uri, path): { dev: { attr: val } }
+        self.sampleSettings = {} # (uri, path): DeviceSettings
         
     def loadSamples(self):
         """learn what lights do from images"""
@@ -89,13 +65,9 @@
                 path = 'show/dance2017/cam/test/%s' % base
                 self.samples[samp] = self.fromPath[base] = loadNumpy(path)
                 self.blurredSamples[samp] = self._blur(self.samples[samp])
-
-                for s in g.objects(samp, L9['setting']):
-                    d = g.value(s, L9['device'])
-                    da = g.value(s, L9['deviceAttr'])
-                    v = getVal(g, s)
-                    key = (samp, g.value(samp, L9['path']).toPython())
-                    self.sampleSettings.setdefault(key, {}).setdefault(d, {})[da] = v
+                
+                key = (samp, g.value(samp, L9['path']).toPython())
+                self.sampleSettings[key] = DeviceSettings.fromResource(self.graph, samp)
 
     def _blur(self, img):
         return scipy.ndimage.gaussian_filter(img, 10, 0, mode='nearest')
@@ -123,7 +95,7 @@
     def solve(self, painting):
         """
         given strokes of colors on a photo of the stage, figure out the
-        best light settings to match the image
+        best light DeviceSettings to match the image
         """
         pic0 = self.draw(painting, 100, 48).astype(numpy.float)
         pic0Blur = self._blur(pic0)
@@ -148,17 +120,10 @@
             return []
 
         scale = brightest0 / brightestSample
-        
-        out = []
-        with self.graph.currentState() as g:
-            for obj in g.objects(sample, L9['setting']):
-                attr = g.value(obj, L9['deviceAttr'])
-                val = getVal(g, obj)
-                if attr == L9['color']:
-                    val = scaledHex(val, scale)
-                out.append((g.value(obj, L9['device']), attr, val))
-                           
-        return out
+
+        s = DeviceSettings.fromResource(self.graph, sample)
+        # missing color scale, but it was wrong to operate on all devs at once
+        return s
 
     def solveBrute(self, painting):
         pic0 = self.draw(painting, 100, 48).astype(numpy.float)
@@ -188,9 +153,9 @@
 
         
         def drawError(x):
-            settings = settingsFromVector(x)
+            settings = DeviceSettings.fromVector(self.graph, x)
             preview = self.combineImages(self.simulationLayers(settings))
-            saveNumpy('/tmp/x_%s.png' % abs(hash(tuple(settings))), preview)
+            saveNumpy('/tmp/x_%s.png' % abs(hash(settings)), preview)
             
             diff = preview.astype(numpy.float) - pic0
             out = scipy.sum(abs(diff))
@@ -206,7 +171,7 @@
             full_output=True)
         if fval > 30000:
             raise ValueError('solution has error of %s' % fval)
-        return settingsFromVector(x0)
+        return DeviceSettings.fromVector(self.graph, x0)
         
     def combineImages(self, layers):
         """make a result image from our self.samples images"""
@@ -222,45 +187,21 @@
         how should a simulation preview approximate the light settings
         (device attribute values) by combining photos we have?
         """
-
-        compiled = {} # dev: { attr: val }
-        for row in settings:
-            compiled.setdefault(row[0], {})[row[1]] = row[2]
-
+        assert isinstance(settings, DeviceSettings)
         layers = []
 
-        for dev, davs in compiled.items():
+        for dev, devSettings in settings.byDevice():
+            requestedColor = devSettings.getValue(dev, L9['color'])
             candidatePics = [] # (distance, path, picColor)
-            
             for (sample, path), s in self.sampleSettings.items():
-                for picDev, picDavs in s.items():
-                    if picDev != dev:
-                        continue
-
-                    requestedAttrs = davs.copy()
-                    picAttrs = picDavs.copy()
-                    del requestedAttrs[L9['color']]
-                    del picAttrs[L9['color']]
-
-                    dist = attrDistance(picAttrs, requestedAttrs)
-                    candidatePics.append((dist, path, picDavs[L9['color']]))
+                dist = devSettings.distanceTo(s.ofDevice(dev))
+                candidatePics.append((dist, path, s.getValue(dev, L9['color'])))
             candidatePics.sort()
             # we could even blend multiple top candidates, or omit all
             # of them if they're too far
             bestDist, bestPath, bestPicColor = candidatePics[0]
 
-            requestedColor = davs[L9['color']]
             layers.append({'path': bestPath,
                            'color': colorRatio(requestedColor, bestPicColor)})
         
         return layers
-
-
-def attrDistance(attrs1, attrs2):
-    dist = 0
-    for key in set(attrs1).union(set(attrs2)):
-        if key not in attrs1 or key not in attrs2:
-            dist += 999
-        else:
-            dist += abs(attrs1[key] - attrs2[key])
-    return dist
--- a/light9/paint/solve_test.py	Fri May 19 07:35:29 2017 +0000
+++ b/light9/paint/solve_test.py	Fri May 19 07:42:27 2017 +0000
@@ -3,6 +3,7 @@
 import solve
 from light9.namespaces import RDF, L9, DEV
 from light9.rdfdb.localsyncedgraph import LocalSyncedGraph
+from light9.effect.settings import DeviceSettings
 
 class TestSolve(unittest.TestCase):
     def setUp(self):
@@ -13,17 +14,17 @@
 
     def testBlack(self):
         devAttrs = self.solveMethod({'strokes': []})
-        self.assertEqual([], devAttrs)
+        self.assertEqual(DeviceSettings(self.graph, []), devAttrs)
 
     def testSingleLightCloseMatch(self):
         devAttrs = self.solveMethod({'strokes': [{'pts': [[224, 141],
                                                  [223, 159]],
                                          'color': '#ffffff'}]})
-        self.assertItemsEqual([
+        self.assertEqual(DeviceSettings(self.graph, [
             (DEV['aura1'], L9['color'], u"#ffffff"),
             (DEV['aura1'], L9['rx'], 0.5 ),
             (DEV['aura1'], L9['ry'], 0.573),
-        ], devAttrs)
+        ]), devAttrs)
 
 class TestSolveBrute(TestSolve):
     def setUp(self):
@@ -32,36 +33,38 @@
         
 class TestSimulationLayers(unittest.TestCase):
     def setUp(self):
-        graph = LocalSyncedGraph(files=['show/dance2017/cam/test/bg.n3'])
-        self.solver = solve.Solver(graph)
+        self.graph = LocalSyncedGraph(files=['show/dance2017/cam/test/bg.n3'])
+        self.solver = solve.Solver(self.graph)
         self.solver.loadSamples()
         
     def testBlack(self):
-        self.assertEqual([], self.solver.simulationLayers(settings=[]))
+        self.assertEqual(
+            [],
+            self.solver.simulationLayers(settings=DeviceSettings(self.graph, [])))
 
     def testPerfect1Match(self):
-        layers = self.solver.simulationLayers(settings=[
+        layers = self.solver.simulationLayers(settings=DeviceSettings(self.graph, [
             (DEV['aura1'], L9['color'], u"#ffffff"),
             (DEV['aura1'], L9['rx'], 0.5 ),
-            (DEV['aura1'], L9['ry'], 0.573)])
+            (DEV['aura1'], L9['ry'], 0.573)]))
         self.assertEqual([{'path': 'bg2-d.jpg', 'color': (1., 1., 1.)}], layers)
 
     def testPerfect1MatchTinted(self):
-        layers = self.solver.simulationLayers(settings=[
+        layers = self.solver.simulationLayers(settings=DeviceSettings(self.graph, [
             (DEV['aura1'], L9['color'], u"#304050"),
             (DEV['aura1'], L9['rx'], 0.5 ),
-            (DEV['aura1'], L9['ry'], 0.573)])
+            (DEV['aura1'], L9['ry'], 0.573)]))
         self.assertEqual([{'path': 'bg2-d.jpg', 'color': (.188, .251, .314)}], layers)
         
     def testPerfect2Matches(self):
-        layers = self.solver.simulationLayers(settings=[
+        layers = self.solver.simulationLayers(settings=DeviceSettings(self.graph, [
             (DEV['aura1'], L9['color'], u"#ffffff"),
             (DEV['aura1'], L9['rx'], 0.5 ),
             (DEV['aura1'], L9['ry'], 0.573),
             (DEV['aura2'], L9['color'], u"#ffffff"),
             (DEV['aura2'], L9['rx'], 0.7 ),
             (DEV['aura2'], L9['ry'], 0.573),
-        ])
+        ]))
         self.assertItemsEqual([
             {'path': 'bg2-d.jpg', 'color': (1, 1, 1)},
             {'path': 'bg2-f.jpg', 'color': (1, 1, 1)},