resolved conflict w giulia by choosing her version

master
Nadine Rotem-Stibbe 8 years ago
commit 20dd05d653

17
.gitignore vendored

@ -0,0 +1,17 @@
/*
!.gitignore
!/etc
/etc/*
!/etc/hostname
!/etc/nginx/sites-available/
!/floppies/
/floppies/*/
!/usr/
/usr/*
!/usr/local/
/usr/local/*
!/usr/local/sbin/
/usr/local/sbin/*/

@ -0,0 +1 @@
TGC3

@ -0,0 +1,42 @@
server {
listen 80 default_server;
listen [::]:80 default_server;
#root /var/www/html;
root /media/floppy/noweb;
# Add index.php to the list if you are using PHP
index index.html index.htm index.nginx-debian.html;
server_name _;
error_page 404 /insert.html;
location = /insert.html {
root /var/www/static;
internal;
}
location / {
try_files $uri $uri/ =404;
}
location /static {
alias /var/www/static;
autoindex on;
}
# /cgi-bin/foo.cgi ==> /media/floppy/noweb/cgi-bin/foo.cgi
location ~ ^/cgi-bin/.*\.cgi$ {
root /media/floppy/noweb/cgi-bin;
rewrite ^/cgi-bin/(.*)\.cgi /$1.cgi break;
include /etc/nginx/fastcgi_params;
fastcgi_pass unix:/var/run/fcgiwrap.socket;
fastcgi_param SCRIPT_FILENAME /media/floppy/noweb/cgi-bin$fastcgi_script_name;
}
}

Binary file not shown.

@ -0,0 +1 @@
FLOPPYLEFT - 2017

@ -0,0 +1,7 @@
Author: Slavoj Žižek
Date: 1989
Title: The Sublime Object of Floppy
Description:
And so on, and so on, and so on.

@ -0,0 +1,217 @@
#!/usr/bin/env python
'''
This module contais some common routines used by other samples.
'''
import numpy as np
import cv2
import os
from contextlib import contextmanager
import itertools as it
image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']
class Bunch(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
return str(self.__dict__)
def splitfn(fn):
path, fn = os.path.split(fn)
name, ext = os.path.splitext(fn)
return path, name, ext
def anorm2(a):
return (a*a).sum(-1)
def anorm(a):
return np.sqrt( anorm2(a) )
def homotrans(H, x, y):
xs = H[0, 0]*x + H[0, 1]*y + H[0, 2]
ys = H[1, 0]*x + H[1, 1]*y + H[1, 2]
s = H[2, 0]*x + H[2, 1]*y + H[2, 2]
return xs/s, ys/s
def to_rect(a):
a = np.ravel(a)
if len(a) == 2:
a = (0, 0, a[0], a[1])
return np.array(a, np.float64).reshape(2, 2)
def rect2rect_mtx(src, dst):
src, dst = to_rect(src), to_rect(dst)
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
tx, ty = dst[0] - src[0] * (cx, cy)
M = np.float64([[ cx, 0, tx],
[ 0, cy, ty],
[ 0, 0, 1]])
return M
def lookat(eye, target, up = (0, 0, 1)):
fwd = np.asarray(target, np.float64) - eye
fwd /= anorm(fwd)
right = np.cross(fwd, up)
right /= anorm(right)
down = np.cross(fwd, right)
R = np.float64([right, down, fwd])
tvec = -np.dot(R, eye)
return R, tvec
def mtx2rvec(R):
w, u, vt = cv2.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
axis = np.cross(vt[0], vt[1])
return axis * np.arctan2(s, c)
def draw_str(dst, (x, y), s):
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.CV_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.CV_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
self.colors_func = colors_func
self.dirty = False
self.show()
cv2.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv2.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv2.EVENT_LBUTTONDOWN:
self.prev_pt = pt
if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv2.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
else:
self.prev_pt = None
# palette data from matplotlib/_cm.py
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
cmap_data = { 'jet' : _jet_data }
def make_cmap(name, n=256):
data = cmap_data[name]
xs = np.linspace(0.0, 1.0, n)
channels = []
eps = 1e-6
for ch_name in ['blue', 'green', 'red']:
ch_data = data[ch_name]
xp, yp = [], []
for x, y1, y2 in ch_data:
xp += [x, x+eps]
yp += [y1, y2]
ch = np.interp(xs, xp, yp)
channels.append(ch)
return np.uint8(np.array(channels).T*255)
def nothing(*arg, **kw):
pass
def clock():
return cv2.getTickCount() / cv2.getTickFrequency()
@contextmanager
def Timer(msg):
print msg, '...',
start = clock()
try:
yield
finally:
print "%.2f ms" % ((clock()-start)*1000)
class StatValue:
def __init__(self, smooth_coef = 0.5):
self.value = None
self.smooth_coef = smooth_coef
def update(self, v):
if self.value is None:
self.value = v
else:
c = self.smooth_coef
self.value = c * self.value + (1.0-c) * v
class RectSelector:
def __init__(self, win, callback):
self.win = win
self.callback = callback
cv2.setMouseCallback(win, self.onmouse)
self.drag_start = None
self.drag_rect = None
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv2.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
if self.drag_start:
if flags & cv2.EVENT_FLAG_LBUTTON:
xo, yo = self.drag_start
x0, y0 = np.minimum([xo, yo], [x, y])
x1, y1 = np.maximum([xo, yo], [x, y])
self.drag_rect = None
if x1-x0 > 0 and y1-y0 > 0:
self.drag_rect = (x0, y0, x1, y1)
else:
rect = self.drag_rect
self.drag_start = None
self.drag_rect = None
if rect:
self.callback(rect)
def draw(self, vis):
if not self.drag_rect:
return False
x0, y0, x1, y1 = self.drag_rect
cv2.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
return True
@property
def dragging(self):
return self.drag_rect is not None
def grouper(n, iterable, fillvalue=None):
'''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx'''
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def mosaic(w, imgs):
'''Make a grid from images.
w -- number of grid columns
imgs -- images (must have same size and format)
'''
imgs = iter(imgs)
img0 = imgs.next()
pad = np.zeros_like(img0)
imgs = it.chain([img0], imgs)
rows = grouper(w, imgs, pad)
return np.vstack(map(np.hstack, rows))
def getsize(img):
h, w = img.shape[:2]
return w, h
def mdot(*args):
return reduce(np.dot, args)
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
cv2.circle(vis, (int(x), int(y)), 2, color)

Binary file not shown.

@ -0,0 +1,156 @@
#!/usr/bin/env python
import numpy as np
import cv2, math
import video
help_message = '''
USAGE: opt_flow.py [<video_source>]
Keys:
1 - toggle HSV flow visualization
2 - toggle glitch
'''
# def draw_flow(img, flow, step=4): # size grid
# h, w = img.shape[:2]
# y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
# fx, fy = flow[y,x].T
# lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
# lines = np.int32(lines + 0.5)
# vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
# cv2.polylines(vis, lines, 0, (0, 0, 255)) # BGR
# for (x1, y1), (x2, y2) in lines:
# cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
# return vis
import OSC
# from pythonosc import osc_message_builder
# from pythonosc import udp_client
import time
def send_flow0(img, flow, step=4): # size grid
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
fx, fy = flow[y,x].T
#print "fx, fy", fx, fy
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
flines = []
for (x1, y1), (x2, y2) in lines:
# print ("y1", y1)
if (x1 == 38 or x1 == 46 or x1 == 54 or x1 == 62 or x1 == 70 or x1 == 78 or x1 == 86 or x1 == 94 or x1 == 102 or x1 == 110 or x1 == 118) and y1 in range(38, 90, 8):
flines.append(((x1,y1),(x2,y2)))
normx = x1 / 8 - 4
normy = 1 - ((y1 / 8 - 4) / 3.0)
dx = x2-x1
dy = y2 - y1
m = int(math.sqrt( (dx*dx) + (dy*dy) ))
if m>2:
print ("dot", (normx, normy))
msg = OSC.OSCMessage()
msg.setAddress("/dot")
#msg.append(dx)
#msg.append(dy)
#msg.append(m)
msg.append(normx)
msg.append(normy)
client.send(msg)
# client.send_message("/franc", m)
# for (x1, y1), (x2, y2) in lines:
# # print ("y1", y1)
# if (y1 == 38 or y1 == 46 or y1 == 54 or y1 == 70 or y1 == 86) and x1 in range(38, 118, 8):
# flines.append(((x1,y1),(x2,y2)))
# dx = x2-x1
# dy = y2 - y1
# m = int(math.sqrt( (dx*dx) + (dy*dy) ))
# if m>2:
# print ("x", (dx, dy, m, x1, y1))
# msg = OSC.OSCMessage()
# msg.setAddress("/x")
# msg.append(dx)
# msg.append(dy)
# msg.append(m)
# msg.append(x1)
# msg.append(y1)
# client.send(msg)
# Here goes BPM
# for (x1, y1), (x2, y2) in lines:
# # print ("y1", y1)
# if (y1 == 10 or y1 == 110) and x1 in range(90, 150, 4):
# flines.append(((x1,y1),(x2,y2)))
# dx = x2-x1
# dy = y2 - y1
# m = int(math.sqrt( (dx*dx) + (dy*dy) ))
# if m>2:
# print ("l", (dx, dy, m, x1, y1))
# msg = OSC.OSCMessage()
# msg.setAddress("/left")
# msg.append(dx)
# msg.append(dy)
# msg.append(m)
# msg.append(x1)
# msg.append(y1)
# client.send(msg)
flines = np.int32(flines)
cv2.polylines(vis, flines, 0, (0, 40, 255)) # BGR
for (x1, y1), (x2, y2) in flines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
flines = np.int32(flines)
cv2.polylines(vis, flines, 0, (0, 40, 255)) # BGR
for (x1, y1), (x2, y2) in flines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
# cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]])
if __name__ == '__main__':
import sys
print help_message
try: fn = sys.argv[1]
except: fn = 0
# connect to pd
# Init OSC
client = OSC.OSCClient()
client.connect(('127.0.0.1', 9001)) # first argument is the IP of the host, second argument is the port to use
#data="hello"
# client = udp_client.SimpleUDPClient("127.0.0.1", 9001)
# connect camera
# cam = video.create_capture(fn)
cam = video.create_capture("0:size=160x120") #canvas size in pixels
ret, prev = cam.read()
prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
cur_glitch = prev.copy()
while True:
# print "GRAB FRAME"
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prevgray, gray, 0.5, 3, 15, 3, 5, 1.2, 0)
prevgray = gray
cv2.imshow('flow', send_flow0(gray, flow))
ch = 0xFF & cv2.waitKey(5)
if ch == 27:
break
cv2.destroyAllWindows()

@ -0,0 +1,65 @@
#N canvas 145 161 897 545 10;
#X floatatom 166 153 5 0 0 0 - - -, f 5;
#X floatatom 282 159 5 0 0 0 - - -, f 5;
#X obj 190 121 unpack f f;
#N canvas 0 50 450 278 (subpatch) 0;
#X array waveform 11 float 0;
#X coords 0 1 10 -1 200 140 1 0 0;
#X restore 494 188 graph;
#X obj 224 247 tabwrite waveform;
#X obj 264 189 t b f;
#X obj 225 220 f;
#X obj 198 354 tabread4~ waveform;
#X obj 232 401 dac~;
#X obj 488 116 tabread waveform;
#X obj 543 23 + 1;
#X obj 505 21 i;
#X obj 504 -39 tgl 15 0 empty empty empty 17 7 0 10 -262144 -1 -1 0
1;
#X msg 534 78 0;
#X floatatom 483 72 5 0 0 0 - - -, f 5;
#X floatatom 424 254 5 0 0 0 - - -, f 5;
#X obj 535 54 select 10;
#X obj 204 325 *~ 10;
#X obj 416 207 + 1;
#X obj 506 -9 metro 100;
#X obj 196 295 osc~;
#X obj 364 370 line~;
#X msg 364 342 \$1 10;
#X obj 337 295 expr 100 + (100 * $f1);
#X obj 267 -45 udpreceive 9001;
#X obj 267 -6 unpackOSC;
#X obj 267 33 routeOSC /dot;
#X obj 111 -112 loadbang;
#X msg 54 -54 \; pd dsp 1;
#X connect 2 0 0 0;
#X connect 2 0 5 0;
#X connect 2 1 1 0;
#X connect 2 1 6 1;
#X connect 5 0 6 0;
#X connect 5 1 4 1;
#X connect 6 0 4 0;
#X connect 7 0 8 0;
#X connect 7 0 8 1;
#X connect 9 0 18 0;
#X connect 10 0 11 1;
#X connect 11 0 10 0;
#X connect 11 0 14 0;
#X connect 11 0 9 0;
#X connect 11 0 16 0;
#X connect 12 0 19 0;
#X connect 13 0 11 1;
#X connect 16 0 13 0;
#X connect 17 0 7 0;
#X connect 18 0 15 0;
#X connect 18 0 23 0;
#X connect 19 0 11 0;
#X connect 20 0 17 0;
#X connect 22 0 21 0;
#X connect 23 0 22 0;
#X connect 23 0 20 0;
#X connect 24 0 25 0;
#X connect 25 0 26 0;
#X connect 26 0 2 0;
#X connect 27 0 28 0;
#X connect 27 0 19 0;

@ -0,0 +1,127 @@
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import cv2, math, sys
# import video
from picamera.array import PiRGBArray
from picamera import PiCamera
import OSC
# from pythonosc import osc_message_builder
# from pythonosc import udp_client
import time
# MESSAGES NEED TO GO TO STDERR
print ("1.HELLO FROM PYTHON stderr", file=sys.stderr)
# def draw_flow(img, flow, step=4): # size grid
# h, w = img.shape[:2]
# y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
# fx, fy = flow[y,x].T
# lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
# lines = np.int32(lines + 0.5)
# vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
# cv2.polylines(vis, lines, 0, (0, 0, 255)) # BGR
# for (x1, y1), (x2, y2) in lines:
# cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
# return vis
def send_flow0(img, flow, step=4): # size grid
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
fx, fy = flow[y,x].T
#print "fx, fy", fx, fy
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
flines = []
for (x1, y1), (x2, y2) in lines:
# print ("y1", y1)
if (x1 == 38 or x1 == 46 or x1 == 54 or x1 == 62 or x1 == 70 or x1 == 78 or x1 == 86 or x1 == 94 or x1 == 102 or x1 == 110 or x1 == 118) and y1 in range(38, 90, 8):
flines.append(((x1,y1),(x2,y2)))
normx = x1 / 8 - 4
normy = 1 - ((y1 / 8 - 4) / 3.0)
dx = x2-x1
dy = y2 - y1
m = int(math.sqrt( (dx*dx) + (dy*dy) ))
if m>2:
print ("dot", (normx, normy), file=sys.stderr)
msg = OSC.OSCMessage()
msg.setAddress("/dot")
#msg.append(dx)
#msg.append(dy)
#msg.append(m)
msg.append(normx)
msg.append(normy)
try:
client.send(msg)
except OSC.OSCClientError:
print ("Unable to send OSC", file=sys.stderr)
# client.send_message("/franc", m)
# flines = np.int32(flines)
# cv2.polylines(vis, flines, 0, (0, 40, 255)) # BGR
# for (x1, y1), (x2, y2) in flines:
# cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
# return vis
# flines = np.int32(flines)
# cv2.polylines(vis, flines, 0, (0, 40, 255)) # BGR
# for (x1, y1), (x2, y2) in flines:
# cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
# return vis
# cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]])
if __name__ == '__main__':
#data="hello"
# client = udp_client.SimpleUDPClient("127.0.0.1", 9001)
# connect camera
# cam = video.create_capture("0:size=160x120") #canvas size in pixels
print ("Starting camera", file=sys.stderr)
cam = PiCamera()
framesize = (160, 128)
cam.resolution = framesize
cam.framerate = 32
rawCapture = PiRGBArray(cam, size=framesize)
# allow the camera to warmup
time.sleep(0.25)
print ("Starting main camera loop", file=sys.stderr)
# connect to pd
# Init OSC
while True:
try:
client = OSC.OSCClient()
client.connect(('127.0.0.1', 9001)) # first argument is the IP of the host, second argument is the port to use
break
except OSC.OSCClientError:
print ("Unable to connect via OSC to pd, trying again in 5", file=sys.stderr)
time.sleep(5)
prevgray = None
for frame in cam.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# while True:
# print "GRAB FRAME"
img = frame.array
# ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if prevgray != None:
flow = cv2.calcOpticalFlowFarneback(prevgray, gray, 0.5, 3, 15, 3, 5, 1.2, 0)
send_flow0(gray, flow)
prevgray = gray
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# cv2.imshow('flow', )
#ch = 0xFF & cv2.waitKey(5)
#if ch == 27:
# break
# cv2.destroyAllWindows()

@ -0,0 +1,194 @@
#!/usr/bin/env python
'''
Video capture sample.
Sample shows how VideoCapture class can be used to acquire video
frames from a camera of a movie file. Also the sample provides
an example of procedural video generation by an object, mimicking
the VideoCapture interface (see Chess class).
'create_capture' is a convinience function for capture creation,
falling back to procedural video in case of error.
Usage:
video.py [--shotdir <shot path>] [source0] [source1] ...'
sourceN is an
- integer number for camera capture
- name of video file
- synth:<params> for procedural video
Synth examples:
synth:bg=../cpp/lena.jpg:noise=0.1
synth:class=chess:bg=../cpp/lena.jpg:noise=0.1:size=640x480
Keys:
ESC - exit
SPACE - save current frame to <shot path> directory
'''
import numpy as np
import cv2
from time import clock
from numpy import pi, sin, cos
import common
class VideoSynthBase(object):
def __init__(self, size=None, noise=0.0, bg = None, **params):
self.bg = None
self.frame_size = (640, 480)
if bg is not None:
self.bg = cv2.imread(bg, 1)
h, w = self.bg.shape[:2]
self.frame_size = (w, h)
if size is not None:
w, h = map(int, size.split('x'))
self.frame_size = (w, h)
self.bg = cv2.resize(self.bg, self.frame_size)
self.noise = float(noise)
def render(self, dst):
pass
def read(self, dst=None):
w, h = self.frame_size
if self.bg is None:
buf = np.zeros((h, w, 3), np.uint8)
else:
buf = self.bg.copy()
self.render(buf)
if self.noise > 0.0:
noise = np.zeros((h, w, 3), np.int8)
cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3)
return True, buf
def isOpened(self):
return True
class Chess(VideoSynthBase):
def __init__(self, **kw):
super(Chess, self).__init__(**kw)
w, h = self.frame_size
self.grid_size = sx, sy = 10, 7
white_quads = []
black_quads = []
for i, j in np.ndindex(sy, sx):
q = [[j, i, 0], [j+1, i, 0], [j+1, i+1, 0], [j, i+1, 0]]
[white_quads, black_quads][(i + j) % 2].append(q)
self.white_quads = np.float32(white_quads)
self.black_quads = np.float32(black_quads)
fx = 0.9
self.K = np.float64([[fx*w, 0, 0.5*(w-1)],
[0, fx*w, 0.5*(h-1)],
[0.0,0.0, 1.0]])
self.dist_coef = np.float64([-0.2, 0.1, 0, 0])
self.t = 0
def draw_quads(self, img, quads, color = (0, 255, 0)):
img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
img_quads.shape = quads.shape[:2] + (2,)
for q in img_quads:
cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.CV_AA, shift=2)
def render(self, dst):
t = self.t
self.t += 1.0/30.0
sx, sy = self.grid_size
center = np.array([0.5*sx, 0.5*sy, 0.0])
phi = pi/3 + sin(t*3)*pi/8
c, s = cos(phi), sin(phi)
ofs = np.array([sin(1.2*t), cos(1.8*t), 0]) * sx * 0.2
eye_pos = center + np.array([cos(t)*c, sin(t)*c, s]) * 15.0 + ofs
target_pos = center + ofs
R, self.tvec = common.lookat(eye_pos, target_pos)
self.rvec = common.mtx2rvec(R)
self.draw_quads(dst, self.white_quads, (245, 245, 245))
self.draw_quads(dst, self.black_quads, (10, 10, 10))
classes = dict(chess=Chess)
presets = dict(
empty = 'synth:',
lena = 'synth:bg=../cpp/lena.jpg:noise=0.1',
chess = 'synth:class=chess:bg=../cpp/lena.jpg:noise=0.1:size=640x480'
)
def create_capture(source = 0, fallback = presets['chess']):
'''source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'
'''
source = str(source).strip()
chunks = source.split(':')
# hanlde drive letter ('c:', ...)
if len(chunks) > 1 and len(chunks[0]) == 1 and chunks[0].isalpha():
chunks[1] = chunks[0] + ':' + chunks[1]
del chunks[0]
source = chunks[0]
try: source = int(source)
except ValueError: pass
params = dict( s.split('=') for s in chunks[1:] )
cap = None
if source == 'synth':
Class = classes.get(params.get('class', None), VideoSynthBase)
try: cap = Class(**params)
except: pass
else:
cap = cv2.VideoCapture(source)
if 'size' in params:
w, h = map(int, params['size'].split('x'))
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, w)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, h)
if cap is None or not cap.isOpened():
print 'Warning: unable to open video source: ', source
if fallback is not None:
return create_capture(fallback, None)
return cap
if __name__ == '__main__':
import sys
import getopt
print __doc__
args, sources = getopt.getopt(sys.argv[1:], '', 'shotdir=')
args = dict(args)
shotdir = args.get('--shotdir', '.')
if len(sources) == 0:
sources = [ 0 ]
caps = map(create_capture, sources)
shot_idx = 0
while True:
imgs = []
for i, cap in enumerate(caps):
ret, img = cap.read()
imgs.append(img)
cv2.imshow('capture %d' % i, img)
ch = 0xFF & cv2.waitKey(1)
if ch == 27:
break
if ch == ord(' '):
for i, img in enumerate(imgs):
fn = '%s/shot_%d_%03d.bmp' % (shotdir, i, shot_idx)
cv2.imwrite(fn, img)
print fn, 'saved'
shot_idx += 1
cv2.destroyAllWindows()

Binary file not shown.

@ -33,10 +33,10 @@ print "Content-type: text/html"
print "" print ""
print Template(u"""<html> print Template(u"""<html>
<head> <head>
<title>RECORD O RAMA</title> <title>ADOPT A WALK</title>
<style type="text/css"> <style type="text/css">
div.movie { div.movie {
border: 5px solid pink; border: 20px solid black;
display: inline-block; display: inline-block;
} }
div.movie img { div.movie img {
@ -46,7 +46,10 @@ div.movie img {
</style> </style>
</head> </head>
<body> <body>
<header><img src="screen.png" alt="" /></header> <header>
<p>head<p>
<img src="../images/header.png" width="100%"/>
</header>
{% for i in items %} {% for i in items %}
<div class="movie"><a href="../clips/{{i.mp4}}"><img src="../clips/{{i.jpg}}" /></a> </div> <div class="movie"><a href="../clips/{{i.mp4}}"><img src="../clips/{{i.jpg}}" /></a> </div>

Binary file not shown.

After

Width:  |  Height:  |  Size: 499 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 99 KiB

@ -22,7 +22,7 @@ def draw(flow):
f[...,0] = 0 #np.minimum(v*10, 255) f[...,0] = 0 #np.minimum(v*10, 255)
f[...,1] = 0 f[...,1] = 0
f[...,2] = 255- np.minimum(v**2, 255) #ang*(180/np.pi/2) f[...,2] = 255- np.minimum(v**3, 255) #ang*(180/np.pi/2)
bgr = cv2.cvtColor(f, cv2.COLOR_HSV2BGR) bgr = cv2.cvtColor(f, cv2.COLOR_HSV2BGR)
return bgr return bgr
@ -35,7 +35,7 @@ p.add_argument("--height", type=int, default=480, help="pre-detect resize height
p.add_argument("--fourcc", default="XVID", help="MJPG,mp4v,XVID") p.add_argument("--fourcc", default="XVID", help="MJPG,mp4v,XVID")
p.add_argument("--framerate", type=float, default=25, help="output frame rate") p.add_argument("--framerate", type=float, default=25, help="output frame rate")
p.add_argument("--show", default=False, action="store_true") p.add_argument("--show", default=False, action="store_true")
p.add_argument("--time", type=float, default=None) p.add_argument("--frames", type=int, default=100)
args = p.parse_args() args = p.parse_args()
fourcc = None fourcc = None
@ -59,9 +59,8 @@ while True:
if prevgray.shape == (args.height, args.width): if prevgray.shape == (args.height, args.width):
break break
count = 0
try: try:
if args.time != None:
start = time.time()
while True: while True:
ret, frame = cam.read() ret, frame = cam.read()
@ -72,18 +71,19 @@ try:
if out != None: if out != None:
out.write(frame) out.write(frame)
count += 1
if args.show: if args.show:
cv2.imshow('display', frame) cv2.imshow('display', frame)
if cv2.waitKey(5) & 0xFF == ord('q'): if cv2.waitKey(5) & 0xFF == ord('q'):
break break
if args.time != None: if args.frames != None:
elapsed = time.time() - start if (count >= args.frames):
if (elapsed >= args.time):
break break
except KeyboardInterrupt: except KeyboardInterrupt:
pass pass
print ("\nCleaning up...") print ("\nCleaning up... Wrote", count, "frames")
if out: if out:
out.release() out.release()
if args.show: if args.show:

@ -31,6 +31,7 @@ if args.output:
else: else:
out = None out = None
count=0
try: try:
if args.time != None: if args.time != None:
start = time.time() start = time.time()
@ -38,6 +39,7 @@ try:
ret, frame = cam.read() ret, frame = cam.read()
if out != None: if out != None:
out.write(frame) out.write(frame)
count += 1
if args.show: if args.show:
cv2.imshow('display', frame) cv2.imshow('display', frame)
if cv2.waitKey(5) & 0xFF == ord('q'): if cv2.waitKey(5) & 0xFF == ord('q'):
@ -50,7 +52,7 @@ try:
except KeyboardInterrupt: except KeyboardInterrupt:
pass pass
print ("\nCleaning up...") print ("\nCleaning up... Wrote", count, "frames")
if out: if out:
out.release() out.release()
if args.show: if args.show:

@ -3,31 +3,85 @@ espeak "Gait analysis number one." -v en
sleep 1 sleep 1
espeak "Please state your name:" -v en espeak "Please state your name:" -v en
sleep 1
espeak "Position yourself 2 to 3 meters away from the Tetra Gamma Circulaire." -v en
sleep 2 sleep 2
espeak "Walk towards the Tetra Gamma Circulaire in a straight line ." -v en
sleep 0.2
play sweep_up.wav
basename=clips/$(date +%Y-%m-%d-%H-%M-%S) basename=clips/$(date +%Y-%m-%d-%H-%M-%S)
echo recording $basename.avi... echo recording $basename.avi...
scripts/black2.py --output $basename.avi --time 3 scripts/black2.py --output $basename.avi --frames 50 --framerate 4 --width 320 --height 240
# convert to mp4 # convert to mp4
ffmpeg -i $basename.avi -y $basename.mp4 ffmpeg -i $basename.avi -y $basename.mp4
# make a thumnail image # make a thumnail image
ffmpeg -i $basename.avi -vframes 1 -ss 0.5 -y $basename.jpg ffmpeg -i $basename.avi -vframes 1 -ss 0.5 -y $basename.jpg
rm $basename.avi # rm $basename.avi
espeak "ciao ciao halo" -v en play sweep_up.wav
sleep 2
espeak "Position yourself one meter away to the left of the Tetra Gamma Circulaire." -v en
sleep 1
espeak "Walk from left to right in front of the Tetra Gamma Circulaire.
" -v en
sleep 0.2
play sweep_up.wav
basename=clips/$(date +%Y-%m-%d-%H-%M-%S)
echo recording $basename.avi...
scripts/black2.py --output $basename.avi --frames 50 --framerate 4 --width 320 --height 240
# convert to mp4
ffmpeg -i $basename.avi -y $basename.mp4
# make a thumnail image
ffmpeg -i $basename.avi -vframes 1 -ss 0.5 -y $basename.jpg
# rm $basename.avi
play sweep_up.wav
espeak "Turn your back to the Tetra Gamma Circulaire." -v en
sleep 1
espeak "Walk away from the Tetra Gamma Circulaire.
" -v en
sleep 0.2
play sweep_up.wav
basename=clips/$(date +%Y-%m-%d-%H-%M-%S) basename=clips/$(date +%Y-%m-%d-%H-%M-%S)
echo recording $basename.avi... echo recording $basename.avi...
scripts/black2.py --output $basename.avi --time 3 scripts/black2.py --output $basename.avi --frames 50 --framerate 4 --width 320 --height 240
# convert to mp4 # convert to mp4
ffmpeg -i $basename.avi -y $basename.mp4 ffmpeg -i $basename.avi -y $basename.mp4
# make a thumnail image # make a thumnail image
ffmpeg -i $basename.avi -vframes 1 -ss 0.5 -y $basename.jpg ffmpeg -i $basename.avi -vframes 1 -ss 0.5 -y $basename.jpg
rm $basename.avi # rm $basename.avi
play sweep_up.wav
espeak "Position yourself 2 to 3 meters away from the Tetra Gamma Circulaire." -v en
sleep 1
espeak "Walk towards the Tetra Gamma Circulaire on a zig zag line.
" -v en
sleep 0.2
play sweep_up.wav
basename=clips/$(date +%Y-%m-%d-%H-%M-%S)
echo recording $basename.avi...
scripts/black2.py --output $basename.avi --frames 50 --framerate 4 --width 320 --height 240
# convert to mp4
ffmpeg -i $basename.avi -y $basename.mp4
# make a thumnail image
ffmpeg -i $basename.avi -vframes 1 -ss 0.5 -y $basename.jpg
# rm $basename.avi
play sweep_up.wav
# subprocess.call(["espeak", "Please state your name:", "-v", "en"]) # subprocess.call(["espeak", "Please state your name:", "-v", "en"])
# sleep(2) # sleep(2)

Binary file not shown.

@ -6,7 +6,7 @@ FLOPPY="/media/floppy"
MAINPY="${FLOPPY}/main.py" MAINPY="${FLOPPY}/main.py"
PYRUN="python ${MAINPY}" PYRUN="python ${MAINPY}"
MAINPD="${FLOPPY}/main.pd" MAINPD="${FLOPPY}/main.pd"
PDRUN="pd -oss -r 48000 -rt -nogui ${MAINPD}" PDRUN="pd -lib import -path /usr/local/lib/pd-externals/net/ -path /usr/local/lib/pd-externals/osc/ -oss -r 48000 -rt -nogui ${MAINPD}"
stdbuf -oL -- udevadm monitor --udev -p ${FD} | while read -r -- STATE _ _ _ _ stdbuf -oL -- udevadm monitor --udev -p ${FD} | while read -r -- STATE _ _ _ _
do do
Loading…
Cancel
Save