comparison service/speechMusic/speechMusic.py @ 75:1132ab1ade80

big rewrite of speechMusic to use klein and openal Ignore-this: 5150fae67462dea7e62424399bda86b3
author drewp@bigasterisk.com
date Wed, 29 May 2013 00:33:02 -0700
parents 0ab069867c64
children 1e94d074f642
comparison
equal deleted inserted replaced
74:bca6d6c63bdc 75:1132ab1ade80
1 #!bin/python 1 #!bin/python
2
3 """ 2 """
4 play sounds according to POST requests. cooperate with pubsubhubbub 3 play sounds according to POST requests.
5 """ 4 """
6 import web, sys, json, subprocess, os, tempfile, logging 5 from __future__ import division
7 from subprocess import check_call 6 import sys, tempfile, logging, pyjade
7 from pyjade.ext.mako import preprocessor as mako_preprocessor
8 from mako.template import Template
9 from mako.lookup import TemplateLookup
10 sys.path.append("python-openal")
11 import openal
12 from twisted.internet import reactor
8 sys.path.append("/my/proj/csigen") 13 sys.path.append("/my/proj/csigen")
9 from generator import tts 14 from generator import tts
10 import xml.etree.ElementTree as ET 15 import xml.etree.ElementTree as ET
11 logging.basicConfig(level=logging.INFO, format="%(created)f %(asctime)s %(levelname)s %(message)s") 16 from klein import Klein
17 from twisted.web.static import File
18 logging.basicConfig(level=logging.INFO,
19 format="%(created)f %(asctime)s %(levelname)s %(message)s")
12 log = logging.getLogger() 20 log = logging.getLogger()
13 21
14 sensorWords = {"wifi" : "why fi", 22 templates = TemplateLookup(directories=['.'],
15 "bluetooth" : "bluetooth"} 23 preprocessor=mako_preprocessor,
24 filesystem_checks=True)
16 25
17 def aplay(device, filename): 26 def makeSpeech(speech, fast=False):
18 paDeviceName = {
19 'garage' : 'alsa_output.pci-0000_01_07.0.analog-stereo',
20 'living' : 'alsa_output.pci-0000_00_04.0.analog-stereo',
21 }[device]
22 subprocess.call(['paplay',
23 '-d', paDeviceName,
24 filename])
25
26 def soundOut(preSound=None, speech='', postSound=None, fast=False):
27
28 speechWav = tempfile.NamedTemporaryFile(suffix='.wav') 27 speechWav = tempfile.NamedTemporaryFile(suffix='.wav')
29 28
30 root = ET.Element("SABLE") 29 root = ET.Element("SABLE")
31 r = ET.SubElement(root, "RATE", 30 r = ET.SubElement(root, "RATE",
32 attrib=dict(SPEED="+50%" if fast else "+0%")) 31 attrib=dict(SPEED="+50%" if fast else "+0%"))
33 for sentence in speech.split('.'): 32 for sentence in speech.split('.'):
34 div = ET.SubElement(r, "DIV") 33 div = ET.SubElement(r, "DIV")
35 div.set("TYPE", "sentence") 34 div.set("TYPE", "sentence")
36 div.text = sentence 35 div.text = sentence
37 36
38 sounds = [] 37 speechSecs = tts(root, speechWav.name)
39 delays = [] 38 return openal.Buffer(speechWav.name), speechSecs
40 39
41 if preSound is not None: 40 class SoundEffects(object):
42 sounds.append(preSound) 41 def __init__(self):
43 delays.extend([0,0]) # assume stereo 42 # for names to pass to this, see alcGetString with ALC_ALL_DEVICES_SPECIFIER
44 43 device = openal.Device()
45 speechSecs = tts(root, speechWav.name) 44 self.contextlistener = device.ContextListener()
46 sounds.append(speechWav.name)
47 delays.append(.4)
48 if postSound is not None:
49 sounds.append(postSound)
50 delays.extend([speechSecs + .4]*2) # assume stereo
51
52 if len(sounds) == 1:
53 outName = sounds[0]
54 else:
55 outWav = tempfile.NamedTemporaryFile(suffix='.wav')
56 check_call(['/usr/bin/sox', '--norm', '--combine', 'merge',
57 ]+sounds+[
58 outWav.name,
59 'delay', ]+map(str, delays)+[
60 'channels', '1'])
61 outName = outWav.name
62 45
63 aplay('living', outName) 46 # also '/my/music/entrance/%s.wav' then speak "Neew %s. %s" % (sensorWords[data['sensor']], data['name']),
64 47
65 class visitorNet(object): 48 print "loading"
66 def POST(self): 49 self.buffers = {
67 data = json.loads(web.data()) 50 'leave': openal.Buffer('/my/music/entrance/leave.wav'),
68 if 'name' not in data: 51 'highlight' : openal.Buffer('/my/music/snd/Oxygen/KDE-Im-Highlight-Msg-44100.wav'),
69 data['name'] = 'unknown' 52 'question' : openal.Buffer('/my/music/snd/angel_ogg/angel_question.wav'),
53 'jazztrumpet': openal.Buffer('/my/music/snd/sampleswap/MELODIC SAMPLES and LOOPS/Acid Jazz Trumpet Lines/acid-jazz-trumpet-11.wav'),
54 'beep1': openal.Buffer('/my/music/snd/bxfr/beep1.wav'),
55 'beep2': openal.Buffer('/my/music/snd/bxfr/beep2.wav'),
56 }
57 print "loaded sounds"
58 self.playingSources = []
59 self.queued = []
60
61 def playEffect(self, name):
62 return self.playBuffer(self.buffers[name])
63
64 def playSpeech(self, txt, preEffect=None, postEffect=None, preEffectOverlap=0):
65 buf, secs = makeSpeech(txt)
66 t = 0
67 if preEffect:
68 t += self.playEffect(preEffect)
69 t -= preEffectOverlap
70 70
71 if data.get('action') == 'arrive': 71 reactor.callLater(t, self.playBuffer, buf)
72 t += secs
73
74 if postEffect:
75 self.playBufferLater(t, self.buffers[postEffect])
76
77 def playBufferLater(self, t, buf):
78 self.queued.append(reactor.callLater(t, self.playBuffer, buf))
72 79
73 snd = ('/my/music/entrance/%s.wav' % 80 def playBuffer(self, buf):
74 data['name'].replace(' ', '_').replace(':', '_')) 81 src = self.contextlistener.get_source()
75 if not os.path.exists(snd): 82 src.buffer = buf
76 snd = None 83 src.play()
77 84
78 soundOut(preSound="/my/music/snd/angel_ogg/angel_question.wav", 85 secs = buf.size / (buf.frequency * buf.channels * buf.bits / 8)
79 # sic: 86 self.playingSources.append(src)
80 speech="Neew %s. %s" % (sensorWords[data['sensor']], 87 reactor.callLater(secs + .1, self.done, src)
81 data['name']), 88 return secs
82 postSound=snd, fast=True)
83 return 'ok'
84 89
85 if data.get('action') == 'leave': 90 def done(self, src):
86 soundOut(preSound='/my/music/entrance/leave.wav', 91 try:
87 speech="lost %s. %s" % (sensorWords[data['sensor']], 92 self.playingSources.remove(src)
88 data['name']), 93 except ValueError:
89 fast=True) 94 pass
90 return 'ok' 95
96 def stopAll(self):
97 while self.playingSources:
98 self.playingSources.pop().stop()
99 for q in self.queued:
100 q.cancel()
101
102 class Server(object):
103 app = Klein()
104 def __init__(self, sfx):
105 self.sfx = sfx
106
107 @app.route('/static/', branch=True)
108 def static(self, request):
109 return File("./static")
110
111 @app.route('/', methods=['GET'])
112 def index(self, request):
113 t = templates.get_template("index.jade")
114 return t.render(effectNames=[
115 dict(name=k, postUri='effects/%s' % k)
116 for k in self.sfx.buffers.keys()])
117
118 @app.route('/speak', methods=['POST'])
119 def speak(self, request):
120 self.sfx.playSpeech(request.args['msg'][0])
121 return "ok"
122
123 @app.route('/effects/<string:name>', methods=['POST'])
124 def effect(self, request, name):
125 self.sfx.playEffect(name)
126 return "ok"
127
128 @app.route('/stopAll', methods=['POST'])
129 def stopAll(self, request):
130 self.sfx.stopAll()
131 return "ok"
91 132
92 return "nothing to do" 133 sfx = SoundEffects()
93 134
94 class index(object): 135 server = Server(sfx)
95 def GET(self): 136 server.app.run("0.0.0.0", 9049)
96 web.header('Content-type', 'text/html')
97 return '''
98 <p><form action="speak" method="post">say: <input type="text" name="say"> <input type="submit"></form></p>
99 <p><form action="testSound" method="post"> <input type="submit" value="test sound"></form></p>
100 '''
101
102 class speak(object):
103 def POST(self):
104 txt = web.input()['say']
105 log.info("speak: %r", txt)
106 soundOut(preSound='/my/music/snd/Oxygen/KDE-Im-Highlight-Msg-44100.wav',
107 speech=txt)
108 return "sent"
109
110 class testSound(object):
111 def POST(self):
112 soundOut(preSound='/my/music/entrance/leave.wav')
113 return 'ok'
114
115 urls = (
116 r'/', 'index',
117 r'/speak', 'speak',
118 r'/testSound', 'testSound',
119 r'/visitorNet', 'visitorNet',
120 )
121
122 app = web.application(urls, globals(), autoreload=True)
123
124 if __name__ == '__main__':
125 sys.argv.append("9049")
126 app.run()