Files
@ 9fe3052f8ced
Branch filter:
Location: light9/light9/paint/solve.py
9fe3052f8ced
8.8 KiB
text/x-python
theater values for capture
Ignore-this: d347d56278b7946bead3cecfd2bd3b8d
Ignore-this: d347d56278b7946bead3cecfd2bd3b8d
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 | from __future__ import division
from light9.namespaces import RDF, L9, DEV
from PIL import Image
import numpy
import scipy.misc, scipy.ndimage, scipy.optimize
import cairo
import logging
from light9.effect.settings import DeviceSettings, parseHex, toHex
log = logging.getLogger('solve')
# numpy images in this file are (x, y, c) layout.
def numpyFromCairo(surface):
w, h = surface.get_width(), surface.get_height()
a = numpy.frombuffer(surface.get_data(), numpy.uint8)
a.shape = h, w, 4
a = a.transpose((1, 0, 2))
return a[:w,:h,:3]
def numpyFromPil(img):
return scipy.misc.fromimage(img, mode='RGB').transpose((1, 0, 2))
def loadNumpy(path, thumb=(100, 100)):
img = Image.open(path)
img.thumbnail(thumb)
return numpyFromPil(img)
def saveNumpy(path, img):
# maybe this should only run if log level is debug?
scipy.misc.imsave(path, img.transpose((1, 0, 2)))
def scaledHex(h, scale):
rgb = parseHex(h)
rgb8 = (rgb * scale).astype(numpy.uint8)
return '#%02x%02x%02x' % tuple(rgb8)
def colorRatio(col1, col2):
rgb1 = parseHex(col1)
rgb2 = parseHex(col2)
def div(x, y):
if y == 0:
return 0
return round(x / y, 3)
return tuple([div(a, b) for a, b in zip(rgb1, rgb2)])
def brightest(img):
return numpy.amax(img, axis=(0, 1))
class ImageDist(object):
def __init__(self, img1):
self.a = img1.reshape((-1,))
self.d = 255 * 255 * self.a.shape[0]
def distanceTo(self, img2):
b = img2.reshape((-1,))
return 1 - numpy.dot(self.a, b) / self.d
class ImageDistAbs(object):
def __init__(self, img1):
self.a = img1
self.maxDist = img1.shape[0] * img1.shape[1] * img1.shape[2] * 255
def distanceTo(self, img2):
return numpy.sum(numpy.absolute(self.a - img2), axis=None) / self.maxDist
class Solver(object):
def __init__(self, graph, sessions=None, imgSize=(100, 75)):
self.graph = graph
self.sessions = sessions # URIs of capture sessions to load
self.imgSize = imgSize
self.samples = {} # uri: Image array (float 0-255)
self.fromPath = {} # imagePath: image array
self.blurredSamples = {}
self.sampleSettings = {} # (uri, path): DeviceSettings
def loadSamples(self):
"""learn what lights do from images"""
with self.graph.currentState() as g:
for sess in self.sessions:
for cap in g.objects(sess, L9['capture']):
self._loadSample(g, cap)
log.info('loaded %s samples', len(self.samples))
def _loadSample(self, g, samp):
pathUri = g.value(samp, L9['imagePath'])
self.samples[samp] = self.fromPath[pathUri] = loadNumpy(pathUri.replace(L9[''], '')).astype(float)
self.blurredSamples[samp] = self._blur(self.samples[samp])
key = (samp, pathUri)
self.sampleSettings[key] = DeviceSettings.fromResource(self.graph, samp)
def _blur(self, img):
return scipy.ndimage.gaussian_filter(img, 10, 0, mode='nearest')
def draw(self, painting):
return self._draw(painting, self.imgSize[0], self.imgSize[1])
def _draw(self, painting, w, h):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h)
ctx = cairo.Context(surface)
ctx.rectangle(0, 0, w, h)
ctx.fill()
ctx.set_line_cap(cairo.LINE_CAP_ROUND)
ctx.set_line_width(w / 5) # ?
for stroke in painting['strokes']:
for pt in stroke['pts']:
op = ctx.move_to if pt is stroke['pts'][0] else ctx.line_to
op(pt[0] * w, pt[1] * h)
r,g,b = parseHex(stroke['color'])
ctx.set_source_rgb(r / 255, g / 255, b / 255)
ctx.stroke()
#surface.write_to_png('/tmp/surf.png')
return numpyFromCairo(surface)
def bestMatch(self, img):
"""the one sample that best matches this image"""
#img = self._blur(img)
results = []
dist = ImageDist(img)
for uri, img2 in sorted(self.samples.items()):
if img.shape != img2.shape:
continue
results.append((dist.distanceTo(img2), uri, img2))
results.sort()
topDist, topUri, topImg = results[0]
#saveNumpy('/tmp/best_in.png', img)
#saveNumpy('/tmp/best_out.png', topImg)
#saveNumpy('/tmp/mult.png', topImg / 255 * img)
return topUri, topDist
def solve(self, painting):
"""
given strokes of colors on a photo of the stage, figure out the
best light DeviceSettings to match the image
"""
pic0 = self.draw(painting).astype(numpy.float)
pic0Blur = self._blur(pic0)
saveNumpy('/tmp/sample_paint_%s.png' % len(painting['strokes']),
pic0Blur)
sampleDist = {}
dist = ImageDist(pic0Blur)
for sample, picSample in sorted(self.blurredSamples.items()):
#saveNumpy('/tmp/sample_%s.png' % sample.split('/')[-1],
# f(picSample))
sampleDist[sample] = dist.distanceTo(picSample)
results = [(d, uri) for uri, d in sampleDist.items()]
results.sort()
sample = results[0][1]
# this is wrong; some wrong-alignments ought to be dimmer than full
brightest0 = brightest(pic0)
brightestSample = brightest(self.samples[sample])
if max(brightest0) < 1 / 255:
return DeviceSettings(self.graph, [])
scale = brightest0 / brightestSample
s = DeviceSettings.fromResource(self.graph, sample)
# missing color scale, but it was wrong to operate on all devs at once
return s
def solveBrute(self, painting):
pic0 = self.draw(painting).astype(numpy.float)
colorSteps = 2
colorStep = 1. / colorSteps
# use toVector then add ranges
dims = [
(DEV['aura1'], L9['rx'], [slice(.2, .7+.1, .2)]),
(DEV['aura1'], L9['ry'], [slice(.573, .573+1, 1)]),
(DEV['aura1'], L9['color'], [slice(0, 1 + colorStep, colorStep),
slice(0, 1 + colorStep, colorStep),
slice(0, 1 + colorStep, colorStep)]),
]
deviceAttrFilter = [(d, a) for d,a,s in dims]
dist = ImageDist(pic0)
def drawError(x):
settings = DeviceSettings.fromVector(self.graph, x, deviceAttrFilter=deviceAttrFilter)
preview = self.combineImages(self.simulationLayers(settings))
#saveNumpy('/tmp/x_%s.png' % abs(hash(settings)), preview)
out = dist.distanceTo(preview)
#print 'measure at', x, 'drawError=', out
return out
x0, fval, grid, Jout = scipy.optimize.brute(
func=drawError,
ranges=sum([s for dev, da, s in dims], []),
finish=None,
disp=True,
full_output=True)
if fval > 30000:
raise ValueError('solution has error of %s' % fval)
return DeviceSettings.fromVector(self.graph, x0, deviceAttrFilter=deviceAttrFilter)
def combineImages(self, layers):
"""make a result image from our self.samples images"""
out = (self.fromPath.itervalues().next() * 0).astype(numpy.uint16)
for layer in layers:
colorScaled = self.fromPath[layer['path']] * layer['color']
out += colorScaled.astype(numpy.uint16)
numpy.clip(out, 0, 255, out)
return out.astype(numpy.uint8)
def simulationLayers(self, settings):
"""
how should a simulation preview approximate the light settings
(device attribute values) by combining photos we have?
"""
assert isinstance(settings, DeviceSettings)
layers = []
for dev, devSettings in settings.byDevice():
requestedColor = devSettings.getValue(dev, L9['color'])
candidatePics = [] # (distance, path, picColor)
for (sample, path), s in self.sampleSettings.items():
otherDevSettings = s.ofDevice(dev)
if not otherDevSettings:
continue
dist = devSettings.distanceTo(otherDevSettings)
log.info(' candidate pic %s %s dist=%s', sample, path, dist)
candidatePics.append((dist, path, s.getValue(dev, L9['color'])))
candidatePics.sort()
# we could even blend multiple top candidates, or omit all
# of them if they're too far
bestDist, bestPath, bestPicColor = candidatePics[0]
log.info(' device best d=%g path=%s color=%s', bestDist, bestPath, bestPicColor)
layers.append({'path': bestPath,
'color': colorRatio(requestedColor, bestPicColor)})
return layers
|