Changeset - 0dc3715050cf
[Not reviewed]
default
0 3 1
drewp@bigasterisk.com - 6 years ago 2019-06-04 16:24:12
drewp@bigasterisk.com
video recorder now makes mp4 per song
Ignore-this: 9d60ecca4c1ab1ab99340addd2d2d264
4 files changed with 237 insertions and 20 deletions:
0 comments (0 inline, 0 general)
bin/vidref
Show inline comments
 
@@ -10,7 +10,8 @@ light9/vidref/videorecorder.py capture f
 
light9/vidref/replay.py backend for vidref.js playback element- figures out which frames go with the current song and time
 
light9/vidref/index.html web ui for watching current stage and song playback
 
light9/vidref/setup.html web ui for setup of camera params and frame crop
 
light9/web/vidref.js LitElement for video playback
 
light9/web/light9-vidref-live.js LitElement for live video frames
 
light9/web/light9-vidref-playback.js LitElement for video playback
 

	
 
"""
 
from run_local import log
 
@@ -55,7 +56,9 @@ class Snapshot(cyclone.web.RequestHandle
 

	
 

	
 
pipeline = videorecorder.GstSource(
 
    '/dev/v4l/by-id/usb-Generic_FULL_HD_1080P_Webcam_200901010001-video-index0')
 
    '/dev/v4l/by-id/usb-Bison_HD_Webcam_200901010001-video-index0'
 
#    '/dev/v4l/by-id/usb-Generic_FULL_HD_1080P_Webcam_200901010001-video-index0'
 
)
 

	
 

	
 
class Live(cyclone.websocket.WebSocketHandler):
 
@@ -66,18 +69,17 @@ class Live(cyclone.websocket.WebSocketHa
 
    def connectionLost(self, reason):
 
        0  #self.subj.dispose()
 

	
 
    def onFrame(self, t_img):
 
        t, img = t_img
 
        if img is None: return
 
    def onFrame(self, cf: videorecorder.CaptureFrame):
 
        if cf is None: return
 
        output = BytesIO()
 
        img.save(output, 'jpeg', quality=80)
 
        cf.img.save(output, 'jpeg', quality=80)
 

	
 
        self.sendMessage(
 
            json.dumps({
 
                'jpeg':
 
                base64.b64encode(output.getvalue()).decode('ascii'),
 
                'description':
 
                f't={t}',
 
                f't={cf.t}',
 
            }))
 

	
 

	
 
@@ -95,7 +97,9 @@ class Time(cyclone.web.RequestHandler):
 
        self.set_status(202)
 

	
 

	
 
graph = SyncedGraph(networking.rdfdb.url, "vidref")
 
#graph = SyncedGraph(networking.rdfdb.url, "vidref")
 
outVideos = videorecorder.FramesToVideoFiles(pipeline.liveImages)
 
#outVideos.save('/tmp/mov1')
 

	
 
port = networking.vidref.port
 
reactor.listenTCP(
light9/vidref/moviestore.py
Show inline comments
 
new file 100644
 
import os
 
from bisect import bisect_left
 
from rdflib import URIRef
 
import sys
 
sys.path.append(
 
    '/home/drewp/Downloads/moviepy/lib/python2.7/site-packages')  # for moviepy
 
from moviepy.video.io.ffmpeg_writer import FFMPEG_VideoWriter
 
from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader
 

	
 

	
 
class _ResourceDir(object):
 
    """the disk files for a resource"""
 

	
 
    def __init__(self, root, uri):
 
        self.root, self.uri = root, uri
 
        u = self.uri.replace('http://', '').replace('/', '_')
 
        self.topDir = os.path.join(self.root, u)
 
        try:
 
            os.makedirs(self.topDir)
 
        except OSError:
 
            pass
 

	
 
    def videoPath(self):
 
        return os.path.join(self.topDir, 'video.avi')
 

	
 
    def indexPath(self):
 
        return os.path.join(self.topDir, 'frame_times')
 

	
 

	
 
class Writer(object):
 
    """saves a video of a resource, receiving a frame at a time. Frame timing does not have to be regular."""
 

	
 
    def __init__(self, root, uri):
 
        self.rd = _ResourceDir(root, uri)
 
        self.ffmpegWriter = None  # lazy since we don't know the size yet
 
        self.index = open(self.rd.indexPath(), 'w')
 
        self.framesWritten = 0
 

	
 
    def save(self, t, img):
 
        if self.ffmpegWriter is None:
 
            self.ffmpegWriter = FFMPEG_VideoWriter(
 
                filename=self.rd.videoPath(),
 
                size=img.size,
 
                fps=10,  # doesn't matter, just for debugging playbacks
 
                codec='libx264')
 
        self.ffmpegWriter.write_frame(img)
 
        self.framesWritten = self.framesWritten + 1
 
        self.index.write('%d %g\n' % (self.framesWritten, t))
 

	
 
    def close(self):
 
        if self.ffmpegWriter is not None:
 
            self.ffmpegWriter.close()
 
        self.index.close()
 

	
 

	
 
class Reader(object):
 

	
 
    def __init__(self, resourceDir):
 
        self.timeFrame = []
 
        for line in open(resourceDir.indexPath()):
 
            f, t = line.strip().split()
 
            self.timeFrame.append((float(t), int(f)))
 
        self._reader = FFMPEG_VideoReader(resourceDir.videoPath())
 

	
 
    def getFrame(self, t):
 
        i = bisect_left(self.timeFrame, (t, None))
 
        i = min(i, len(self.timeFrame) - 1)
 
        f = self.timeFrame[i][1]
 
        return self._reader.get_frame(f)
 

	
 

	
 
class MultiReader(object):
 
    """loads the nearest existing frame of a resource's video. Supports random access of multiple resources."""
 

	
 
    def __init__(self, root):
 
        self.root = root
 
        # these should cleanup when they haven't been used in a while
 
        self.readers = {}  # uri: Reader
 

	
 
    def getFrame(self, uri, t):
 
        if uri not in self.readers:
 
            #self.readers.close all and pop them
 
            self.readers[uri] = Reader(_ResourceDir(self.root, uri))
 
        return self.readers[uri].getFrame(t)
 

	
 

	
 
if __name__ == '__main__':
 
    from PIL import Image
 
    take = URIRef(
 
        'http://light9.bigasterisk.com/show/dance2015/song10/1434249076/')
 
    if 0:
 
        w = Writer('/tmp/ms', take)
 
        for fn in sorted(
 
                os.listdir(
 
                    '/home/drewp/light9-vidref/play-light9.bigasterisk.com_show_dance2015_song10/1434249076'
 
                )):
 
            t = float(fn.replace('.jpg', ''))
 
            jpg = Image.open(
 
                '/home/drewp/light9-vidref/play-light9.bigasterisk.com_show_dance2015_song10/1434249076/%08.03f.jpg'
 
                % t)
 
            jpg = jpg.resize((450, 176))
 
            w.save(t, jpg)
 
        w.close()
 
    else:
 
        r = MultiReader('/tmp/ms')
 
        print((r.getFrame(take, 5.6)))
light9/vidref/videorecorder.py
Show inline comments
 
import sys
 
import time, logging, os, traceback, sys
 

	
 
import gi
 
gi.require_version('Gst', '1.0')
 
gi.require_version('GstBase', '1.0')
 
from gi.repository import Gst
 
from rx.subjects import BehaviorSubject
 

	
 
import time, logging, os, traceback
 
from dataclasses import dataclass
 
from PIL import Image
 
from twisted.internet import defer
 
from twisted.internet import defer, threads
 
from queue import Queue
 
from light9.vidref.replay import framerate, songDir, takeDir, snapshotDir
 
from typing import Set
 

	
 
from typing import Set, Optional
 
import moviepy.editor
 
import numpy
 
from light9.ascoltami.musictime_client import MusicTime
 
from light9.newtypes import Song
 
from IPython.core import ultratb
 
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
 
                                     color_scheme='Linux',
 
@@ -19,6 +22,98 @@ sys.excepthook = ultratb.FormattedTB(mod
 

	
 
log = logging.getLogger()
 

	
 
@dataclass(frozen=True)
 
class CaptureFrame:
 
    img: Image
 
    song: Song
 
    t: float
 
    isPlaying: bool
 

	
 
class FramesToVideoFiles:
 
    """
 

	
 
    nextWriteAction: 'ignore'
 
    currentOutputClip: None
 

	
 
    (frames come in for new video)
 
    nextWriteAction: 'saveFrame'
 
    currentOutputClip: new VideoClip
 
    (many frames)
 

	
 
    (music stops or song changes)
 
    nextWriteAction: 'close'
 
    currentOutputClip: None
 
    nextWriteAction: 'ignore'
 
    
 
    """
 
    def __init__(self, frames: BehaviorSubject):
 
        self.frames = frames
 
        self.nextImg = None
 

	
 
        self.currentOutputClip = None
 
        self.currentOutputSong = None
 
        self.nextWriteAction = 'ignore'
 
        self.frames.subscribe(on_next=self.onFrame)
 

	
 
    def onFrame(self, cf: Optional[CaptureFrame]):
 
        if cf is None:
 
            return
 
        self.nextImg = cf
 

	
 
        if self.currentOutputClip is None and cf.isPlaying:
 
            # start up
 
            self.nextWriteAction = 'saveFrames'
 
            self.currentOutputSong = cf.song
 
            self.save('/tmp/out%s' % time.time())
 
        elif self.currentOutputClip and cf.isPlaying:
 
            self.nextWriteAction = 'saveFrames'
 
            # continue recording this
 
        elif self.currentOutputClip is None and not cf.isPlaying:
 
            self.nextWriteAction  = 'notWritingClip'
 
            pass # continue waiting
 
        elif self.currentOutputClip and not cf.isPlaying or self.currentOutputSong != cf.song:
 
            # stop
 
            self.nextWriteAction = 'close'
 
        else:
 
            raise NotImplementedError        
 

	
 
    def save(self, outBase):
 
        """
 
        receive frames (infinite) and wall-to-song times (stream ends with
 
        the song), and write a video file and a frame map
 
        """
 
        return threads.deferToThread(self._bg_save, outBase)
 

	
 
    def _bg_save(self, outBase):
 
        self.frameMap = open(outBase + '.timing', 'wt')
 

	
 
        # (immediately calls make_frame)
 
        self.currentOutputClip = moviepy.editor.VideoClip(
 
            self._bg_make_frame, duration=999.)
 
        self.currentOutputClip.fps = 5
 
        log.info(f'write_videofile {outBase} start')
 
        try:
 
            self.currentOutputClip.write_videofile(
 
                outBase + '.mp4',
 
                audio=False, preset='ultrafast', verbose=True, bitrate='150000')
 
        except (StopIteration, RuntimeError):
 
            pass
 
        log.info('write_videofile done')
 
        self.currentOutputClip = None
 
         
 
    def _bg_make_frame(self, video_time_secs):
 
        if self.nextWriteAction == 'close':
 
            raise StopIteration # the one in write_videofile
 

	
 
        # should be a queue to miss fewer frames
 
        while self.nextImg is None:
 
            time.sleep(.03)
 
        cf, self.nextImg = self.nextImg, None
 

	
 
        self.frameMap.write(
 
            f'video {video_time_secs:g} = song {cf.t:g}\n')
 
        self.frameMap.flush()
 
        return numpy.asarray(cf.img)
 

	
 
class GstSource:
 

	
 
@@ -27,15 +122,21 @@ class GstSource:
 
        make new gst pipeline
 
        """
 
        Gst.init(None)
 
        self.liveImages = BehaviorSubject((0, None))
 
        self.musicTime = MusicTime(pollCurvecalc=False)
 
        self.liveImages: BehaviorSubject[Optional[CaptureFrame]] = BehaviorSubject(None)
 

	
 
        size = [800, 600]
 
        size = [640, 480]
 

	
 
        log.info("new pipeline using device=%s" % dev)
 
        
 
        # using videocrop breaks the pipeline, may be this issue
 
        # https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/issues/732
 
        pipeStr = f"v4l2src device=\"{dev}\" ! videoconvert ! appsink emit-signals=true max-buffers=1 drop=true name=end0 caps=video/x-raw,format=RGB,width={size[0]},height={size[1]}"
 
        pipeStr = (
 
            #f"v4l2src device=\"{dev}\""
 
            f'autovideosrc'
 
            f" ! videoconvert"
 
            f" ! appsink emit-signals=true max-buffers=1 drop=true name=end0 caps=video/x-raw,format=RGB,width={size[0]},height={size[1]}"
 
            )
 
        log.info("pipeline: %s" % pipeStr)
 

	
 
        self.pipe = Gst.parse_launch(pipeStr)
 
@@ -46,7 +147,7 @@ class GstSource:
 
        self.appsink.connect('new-sample', self.new_sample)
 

	
 
        self.pipe.set_state(Gst.State.PLAYING)
 
        log.info('recording video')
 
        log.info('gst pipeline is recording video')
 

	
 
    def new_sample(self, appsink):
 
        try:
 
@@ -59,10 +160,15 @@ class GstSource:
 
                    'RGB', (caps.get_structure(0).get_value('width'),
 
                            caps.get_structure(0).get_value('height')),
 
                    mapinfo.data)
 
                img = img.crop((0, 100, 800,  500))
 
                img = img.crop((0, 100, 640, 380))
 
            finally:
 
                buf.unmap(mapinfo)
 
            self.liveImages.on_next((time.time(), img))
 
            # could get gst's frame time and pass it to getLatest
 
            latest = self.musicTime.getLatest()
 
            if 'song' in latest:
 
                self.liveImages.on_next(
 
                    CaptureFrame(img, Song(latest['song']),
 
                                 latest['t'], latest['playing']))
 
        except Exception:
 
            traceback.print_exc()
 
        return Gst.FlowReturn.OK
requirements.txt
Show inline comments
 
@@ -7,6 +7,7 @@ freezegun==0.3.8
 
genshi==0.7
 
klein==17.2.0
 
mock==2.0.0
 
moviepy==1.0.0
 
noise==1.2.2
 
nose-watcher==0.1.3
 
nose==1.3.7
0 comments (0 inline, 0 general)