first
parent
ee04c495b5
commit
4b60dcc059
Binary file not shown.
@ -0,0 +1 @@
|
||||
FLOPPYLEFT - 2017
|
@ -0,0 +1,7 @@
|
||||
Author: Slavoj Žižek
|
||||
Date: 1989
|
||||
Title: The Sublime Object of Floppy
|
||||
|
||||
Description:
|
||||
|
||||
And so on, and so on, and so on.
|
@ -0,0 +1,217 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
'''
|
||||
This module contais some common routines used by other samples.
|
||||
'''
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
import os
|
||||
from contextlib import contextmanager
|
||||
import itertools as it
|
||||
|
||||
image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']
|
||||
|
||||
class Bunch(object):
|
||||
def __init__(self, **kw):
|
||||
self.__dict__.update(kw)
|
||||
def __str__(self):
|
||||
return str(self.__dict__)
|
||||
|
||||
def splitfn(fn):
|
||||
path, fn = os.path.split(fn)
|
||||
name, ext = os.path.splitext(fn)
|
||||
return path, name, ext
|
||||
|
||||
def anorm2(a):
|
||||
return (a*a).sum(-1)
|
||||
def anorm(a):
|
||||
return np.sqrt( anorm2(a) )
|
||||
|
||||
def homotrans(H, x, y):
|
||||
xs = H[0, 0]*x + H[0, 1]*y + H[0, 2]
|
||||
ys = H[1, 0]*x + H[1, 1]*y + H[1, 2]
|
||||
s = H[2, 0]*x + H[2, 1]*y + H[2, 2]
|
||||
return xs/s, ys/s
|
||||
|
||||
def to_rect(a):
|
||||
a = np.ravel(a)
|
||||
if len(a) == 2:
|
||||
a = (0, 0, a[0], a[1])
|
||||
return np.array(a, np.float64).reshape(2, 2)
|
||||
|
||||
def rect2rect_mtx(src, dst):
|
||||
src, dst = to_rect(src), to_rect(dst)
|
||||
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
|
||||
tx, ty = dst[0] - src[0] * (cx, cy)
|
||||
M = np.float64([[ cx, 0, tx],
|
||||
[ 0, cy, ty],
|
||||
[ 0, 0, 1]])
|
||||
return M
|
||||
|
||||
|
||||
def lookat(eye, target, up = (0, 0, 1)):
|
||||
fwd = np.asarray(target, np.float64) - eye
|
||||
fwd /= anorm(fwd)
|
||||
right = np.cross(fwd, up)
|
||||
right /= anorm(right)
|
||||
down = np.cross(fwd, right)
|
||||
R = np.float64([right, down, fwd])
|
||||
tvec = -np.dot(R, eye)
|
||||
return R, tvec
|
||||
|
||||
def mtx2rvec(R):
|
||||
w, u, vt = cv2.SVDecomp(R - np.eye(3))
|
||||
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
|
||||
c = np.dot(vt[0], p)
|
||||
s = np.dot(vt[1], p)
|
||||
axis = np.cross(vt[0], vt[1])
|
||||
return axis * np.arctan2(s, c)
|
||||
|
||||
def draw_str(dst, (x, y), s):
|
||||
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.CV_AA)
|
||||
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.CV_AA)
|
||||
|
||||
class Sketcher:
|
||||
def __init__(self, windowname, dests, colors_func):
|
||||
self.prev_pt = None
|
||||
self.windowname = windowname
|
||||
self.dests = dests
|
||||
self.colors_func = colors_func
|
||||
self.dirty = False
|
||||
self.show()
|
||||
cv2.setMouseCallback(self.windowname, self.on_mouse)
|
||||
|
||||
def show(self):
|
||||
cv2.imshow(self.windowname, self.dests[0])
|
||||
|
||||
def on_mouse(self, event, x, y, flags, param):
|
||||
pt = (x, y)
|
||||
if event == cv2.EVENT_LBUTTONDOWN:
|
||||
self.prev_pt = pt
|
||||
if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON:
|
||||
for dst, color in zip(self.dests, self.colors_func()):
|
||||
cv2.line(dst, self.prev_pt, pt, color, 5)
|
||||
self.dirty = True
|
||||
self.prev_pt = pt
|
||||
self.show()
|
||||
else:
|
||||
self.prev_pt = None
|
||||
|
||||
|
||||
# palette data from matplotlib/_cm.py
|
||||
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
|
||||
(1, 0.5, 0.5)),
|
||||
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
|
||||
(0.91,0,0), (1, 0, 0)),
|
||||
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
|
||||
(1, 0, 0))}
|
||||
|
||||
cmap_data = { 'jet' : _jet_data }
|
||||
|
||||
def make_cmap(name, n=256):
|
||||
data = cmap_data[name]
|
||||
xs = np.linspace(0.0, 1.0, n)
|
||||
channels = []
|
||||
eps = 1e-6
|
||||
for ch_name in ['blue', 'green', 'red']:
|
||||
ch_data = data[ch_name]
|
||||
xp, yp = [], []
|
||||
for x, y1, y2 in ch_data:
|
||||
xp += [x, x+eps]
|
||||
yp += [y1, y2]
|
||||
ch = np.interp(xs, xp, yp)
|
||||
channels.append(ch)
|
||||
return np.uint8(np.array(channels).T*255)
|
||||
|
||||
def nothing(*arg, **kw):
|
||||
pass
|
||||
|
||||
def clock():
|
||||
return cv2.getTickCount() / cv2.getTickFrequency()
|
||||
|
||||
@contextmanager
|
||||
def Timer(msg):
|
||||
print msg, '...',
|
||||
start = clock()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
print "%.2f ms" % ((clock()-start)*1000)
|
||||
|
||||
class StatValue:
|
||||
def __init__(self, smooth_coef = 0.5):
|
||||
self.value = None
|
||||
self.smooth_coef = smooth_coef
|
||||
def update(self, v):
|
||||
if self.value is None:
|
||||
self.value = v
|
||||
else:
|
||||
c = self.smooth_coef
|
||||
self.value = c * self.value + (1.0-c) * v
|
||||
|
||||
class RectSelector:
|
||||
def __init__(self, win, callback):
|
||||
self.win = win
|
||||
self.callback = callback
|
||||
cv2.setMouseCallback(win, self.onmouse)
|
||||
self.drag_start = None
|
||||
self.drag_rect = None
|
||||
def onmouse(self, event, x, y, flags, param):
|
||||
x, y = np.int16([x, y]) # BUG
|
||||
if event == cv2.EVENT_LBUTTONDOWN:
|
||||
self.drag_start = (x, y)
|
||||
if self.drag_start:
|
||||
if flags & cv2.EVENT_FLAG_LBUTTON:
|
||||
xo, yo = self.drag_start
|
||||
x0, y0 = np.minimum([xo, yo], [x, y])
|
||||
x1, y1 = np.maximum([xo, yo], [x, y])
|
||||
self.drag_rect = None
|
||||
if x1-x0 > 0 and y1-y0 > 0:
|
||||
self.drag_rect = (x0, y0, x1, y1)
|
||||
else:
|
||||
rect = self.drag_rect
|
||||
self.drag_start = None
|
||||
self.drag_rect = None
|
||||
if rect:
|
||||
self.callback(rect)
|
||||
def draw(self, vis):
|
||||
if not self.drag_rect:
|
||||
return False
|
||||
x0, y0, x1, y1 = self.drag_rect
|
||||
cv2.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
|
||||
return True
|
||||
@property
|
||||
def dragging(self):
|
||||
return self.drag_rect is not None
|
||||
|
||||
|
||||
def grouper(n, iterable, fillvalue=None):
|
||||
'''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx'''
|
||||
args = [iter(iterable)] * n
|
||||
return it.izip_longest(fillvalue=fillvalue, *args)
|
||||
|
||||
def mosaic(w, imgs):
|
||||
'''Make a grid from images.
|
||||
|
||||
w -- number of grid columns
|
||||
imgs -- images (must have same size and format)
|
||||
'''
|
||||
imgs = iter(imgs)
|
||||
img0 = imgs.next()
|
||||
pad = np.zeros_like(img0)
|
||||
imgs = it.chain([img0], imgs)
|
||||
rows = grouper(w, imgs, pad)
|
||||
return np.vstack(map(np.hstack, rows))
|
||||
|
||||
def getsize(img):
|
||||
h, w = img.shape[:2]
|
||||
return w, h
|
||||
|
||||
def mdot(*args):
|
||||
return reduce(np.dot, args)
|
||||
|
||||
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
|
||||
for kp in keypoints:
|
||||
x, y = kp.pt
|
||||
cv2.circle(vis, (int(x), int(y)), 2, color)
|
Binary file not shown.
@ -0,0 +1,66 @@
|
||||
#N canvas 136 23 897 545 10;
|
||||
#X floatatom 166 153 5 0 0 0 - - -;
|
||||
#X floatatom 282 159 5 0 0 0 - - -;
|
||||
#X obj 54 -92 tgl 15 0 empty empty empty 17 7 0 10 -262130 -1 -1 1
|
||||
1;
|
||||
#X msg 54 -54 \; pd dsp \$1;
|
||||
#X obj 267 -7 OSC/unpackOSC;
|
||||
#X obj 267 -46 iemnet/udpreceive 9001;
|
||||
#X obj 267 33 OSC/routeOSC /dot;
|
||||
#X obj 190 121 unpack f f;
|
||||
#N canvas 0 22 450 278 (subpatch) 0;
|
||||
#X array waveform 11 float 1;
|
||||
#A 0 -1 -1 -1 -1 -1 -1 -1 -0.333333 0 0.333333 -1;
|
||||
#X coords 0 1 10 -1 200 140 1 0 0;
|
||||
#X restore 494 188 graph;
|
||||
#X obj 224 247 tabwrite waveform;
|
||||
#X obj 264 189 t b f;
|
||||
#X obj 225 220 f;
|
||||
#X obj 198 354 tabread4~ waveform;
|
||||
#X obj 232 401 dac~;
|
||||
#X obj 488 116 tabread waveform;
|
||||
#X obj 543 23 + 1;
|
||||
#X obj 505 21 i;
|
||||
#X obj 504 -39 tgl 15 0 empty empty empty 17 7 0 10 -262144 -1 -1 1
|
||||
1;
|
||||
#X msg 534 78 0;
|
||||
#X floatatom 483 72 5 0 0 0 - - -;
|
||||
#X floatatom 424 254 5 0 0 0 - - -;
|
||||
#X obj 535 54 select 10;
|
||||
#X obj 204 325 *~ 10;
|
||||
#X obj 416 207 + 1;
|
||||
#X obj 506 -9 metro 100;
|
||||
#X obj 196 295 osc~;
|
||||
#X obj 364 370 line~;
|
||||
#X msg 364 342 \$1 10;
|
||||
#X obj 337 295 expr 100 + (100 * $f1);
|
||||
#X connect 2 0 3 0;
|
||||
#X connect 4 0 6 0;
|
||||
#X connect 5 0 4 0;
|
||||
#X connect 6 0 7 0;
|
||||
#X connect 7 0 0 0;
|
||||
#X connect 7 0 10 0;
|
||||
#X connect 7 1 1 0;
|
||||
#X connect 7 1 11 1;
|
||||
#X connect 10 0 11 0;
|
||||
#X connect 10 1 9 1;
|
||||
#X connect 11 0 9 0;
|
||||
#X connect 12 0 13 0;
|
||||
#X connect 12 0 13 1;
|
||||
#X connect 14 0 23 0;
|
||||
#X connect 15 0 16 1;
|
||||
#X connect 16 0 15 0;
|
||||
#X connect 16 0 19 0;
|
||||
#X connect 16 0 14 0;
|
||||
#X connect 16 0 21 0;
|
||||
#X connect 17 0 24 0;
|
||||
#X connect 18 0 16 1;
|
||||
#X connect 21 0 18 0;
|
||||
#X connect 22 0 12 0;
|
||||
#X connect 23 0 20 0;
|
||||
#X connect 23 0 28 0;
|
||||
#X connect 24 0 16 0;
|
||||
#X connect 25 0 22 0;
|
||||
#X connect 27 0 26 0;
|
||||
#X connect 28 0 27 0;
|
||||
#X connect 28 0 25 0;
|
@ -0,0 +1,156 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import numpy as np
|
||||
import cv2, math
|
||||
import video
|
||||
|
||||
help_message = '''
|
||||
USAGE: opt_flow.py [<video_source>]
|
||||
|
||||
Keys:
|
||||
1 - toggle HSV flow visualization
|
||||
2 - toggle glitch
|
||||
|
||||
'''
|
||||
|
||||
# def draw_flow(img, flow, step=4): # size grid
|
||||
# h, w = img.shape[:2]
|
||||
# y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
|
||||
# fx, fy = flow[y,x].T
|
||||
# lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
|
||||
# lines = np.int32(lines + 0.5)
|
||||
# vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
||||
# cv2.polylines(vis, lines, 0, (0, 0, 255)) # BGR
|
||||
# for (x1, y1), (x2, y2) in lines:
|
||||
# cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
|
||||
# return vis
|
||||
|
||||
import OSC
|
||||
# from pythonosc import osc_message_builder
|
||||
# from pythonosc import udp_client
|
||||
import time
|
||||
|
||||
def send_flow0(img, flow, step=4): # size grid
|
||||
h, w = img.shape[:2]
|
||||
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
|
||||
fx, fy = flow[y,x].T
|
||||
#print "fx, fy", fx, fy
|
||||
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
|
||||
lines = np.int32(lines + 0.5)
|
||||
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
||||
|
||||
|
||||
flines = []
|
||||
for (x1, y1), (x2, y2) in lines:
|
||||
# print ("y1", y1)
|
||||
if (x1 == 38 or x1 == 46 or x1 == 54 or x1 == 62 or x1 == 70 or x1 == 78 or x1 == 86 or x1 == 94 or x1 == 102 or x1 == 110 or x1 == 118) and y1 in range(38, 90, 8):
|
||||
flines.append(((x1,y1),(x2,y2)))
|
||||
normx = x1 / 8 - 4
|
||||
normy = 1 - ((y1 / 8 - 4) / 3.0)
|
||||
dx = x2-x1
|
||||
dy = y2 - y1
|
||||
m = int(math.sqrt( (dx*dx) + (dy*dy) ))
|
||||
if m>2:
|
||||
print ("dot", (normx, normy))
|
||||
msg = OSC.OSCMessage()
|
||||
msg.setAddress("/dot")
|
||||
#msg.append(dx)
|
||||
#msg.append(dy)
|
||||
#msg.append(m)
|
||||
msg.append(normx)
|
||||
msg.append(normy)
|
||||
client.send(msg)
|
||||
# client.send_message("/franc", m)
|
||||
|
||||
|
||||
# for (x1, y1), (x2, y2) in lines:
|
||||
# # print ("y1", y1)
|
||||
# if (y1 == 38 or y1 == 46 or y1 == 54 or y1 == 70 or y1 == 86) and x1 in range(38, 118, 8):
|
||||
# flines.append(((x1,y1),(x2,y2)))
|
||||
# dx = x2-x1
|
||||
# dy = y2 - y1
|
||||
# m = int(math.sqrt( (dx*dx) + (dy*dy) ))
|
||||
# if m>2:
|
||||
# print ("x", (dx, dy, m, x1, y1))
|
||||
# msg = OSC.OSCMessage()
|
||||
# msg.setAddress("/x")
|
||||
# msg.append(dx)
|
||||
# msg.append(dy)
|
||||
# msg.append(m)
|
||||
# msg.append(x1)
|
||||
# msg.append(y1)
|
||||
# client.send(msg)
|
||||
|
||||
|
||||
# Here goes BPM
|
||||
|
||||
|
||||
|
||||
|
||||
# for (x1, y1), (x2, y2) in lines:
|
||||
# # print ("y1", y1)
|
||||
# if (y1 == 10 or y1 == 110) and x1 in range(90, 150, 4):
|
||||
# flines.append(((x1,y1),(x2,y2)))
|
||||
# dx = x2-x1
|
||||
# dy = y2 - y1
|
||||
# m = int(math.sqrt( (dx*dx) + (dy*dy) ))
|
||||
# if m>2:
|
||||
# print ("l", (dx, dy, m, x1, y1))
|
||||
# msg = OSC.OSCMessage()
|
||||
# msg.setAddress("/left")
|
||||
# msg.append(dx)
|
||||
# msg.append(dy)
|
||||
# msg.append(m)
|
||||
# msg.append(x1)
|
||||
# msg.append(y1)
|
||||
# client.send(msg)
|
||||
|
||||
|
||||
flines = np.int32(flines)
|
||||
cv2.polylines(vis, flines, 0, (0, 40, 255)) # BGR
|
||||
for (x1, y1), (x2, y2) in flines:
|
||||
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
|
||||
return vis
|
||||
|
||||
flines = np.int32(flines)
|
||||
cv2.polylines(vis, flines, 0, (0, 40, 255)) # BGR
|
||||
for (x1, y1), (x2, y2) in flines:
|
||||
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
|
||||
return vis
|
||||
|
||||
# cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]])
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
print help_message
|
||||
try: fn = sys.argv[1]
|
||||
except: fn = 0
|
||||
|
||||
|
||||
# connect to pd
|
||||
# Init OSC
|
||||
client = OSC.OSCClient()
|
||||
client.connect(('127.0.0.1', 9001)) # first argument is the IP of the host, second argument is the port to use
|
||||
#data="hello"
|
||||
# client = udp_client.SimpleUDPClient("127.0.0.1", 9001)
|
||||
|
||||
# connect camera
|
||||
# cam = video.create_capture(fn)
|
||||
cam = video.create_capture("0:size=160x120") #canvas size in pixels
|
||||
ret, prev = cam.read()
|
||||
prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
|
||||
cur_glitch = prev.copy()
|
||||
|
||||
while True:
|
||||
# print "GRAB FRAME"
|
||||
ret, img = cam.read()
|
||||
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
flow = cv2.calcOpticalFlowFarneback(prevgray, gray, 0.5, 3, 15, 3, 5, 1.2, 0)
|
||||
prevgray = gray
|
||||
|
||||
cv2.imshow('flow', send_flow0(gray, flow))
|
||||
|
||||
ch = 0xFF & cv2.waitKey(5)
|
||||
if ch == 27:
|
||||
break
|
||||
cv2.destroyAllWindows()
|
@ -0,0 +1 @@
|
||||
GREAT JOB!
|
@ -0,0 +1,194 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
'''
|
||||
Video capture sample.
|
||||
|
||||
Sample shows how VideoCapture class can be used to acquire video
|
||||
frames from a camera of a movie file. Also the sample provides
|
||||
an example of procedural video generation by an object, mimicking
|
||||
the VideoCapture interface (see Chess class).
|
||||
|
||||
'create_capture' is a convinience function for capture creation,
|
||||
falling back to procedural video in case of error.
|
||||
|
||||
Usage:
|
||||
video.py [--shotdir <shot path>] [source0] [source1] ...'
|
||||
|
||||
sourceN is an
|
||||
- integer number for camera capture
|
||||
- name of video file
|
||||
- synth:<params> for procedural video
|
||||
|
||||
Synth examples:
|
||||
synth:bg=../cpp/lena.jpg:noise=0.1
|
||||
synth:class=chess:bg=../cpp/lena.jpg:noise=0.1:size=640x480
|
||||
|
||||
Keys:
|
||||
ESC - exit
|
||||
SPACE - save current frame to <shot path> directory
|
||||
|
||||
'''
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
from time import clock
|
||||
from numpy import pi, sin, cos
|
||||
import common
|
||||
|
||||
class VideoSynthBase(object):
|
||||
def __init__(self, size=None, noise=0.0, bg = None, **params):
|
||||
self.bg = None
|
||||
self.frame_size = (640, 480)
|
||||
if bg is not None:
|
||||
self.bg = cv2.imread(bg, 1)
|
||||
h, w = self.bg.shape[:2]
|
||||
self.frame_size = (w, h)
|
||||
|
||||
if size is not None:
|
||||
w, h = map(int, size.split('x'))
|
||||
self.frame_size = (w, h)
|
||||
self.bg = cv2.resize(self.bg, self.frame_size)
|
||||
|
||||
self.noise = float(noise)
|
||||
|
||||
def render(self, dst):
|
||||
pass
|
||||
|
||||
def read(self, dst=None):
|
||||
w, h = self.frame_size
|
||||
|
||||
if self.bg is None:
|
||||
buf = np.zeros((h, w, 3), np.uint8)
|
||||
else:
|
||||
buf = self.bg.copy()
|
||||
|
||||
self.render(buf)
|
||||
|
||||
if self.noise > 0.0:
|
||||
noise = np.zeros((h, w, 3), np.int8)
|
||||
cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
|
||||
buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3)
|
||||
return True, buf
|
||||
|
||||
def isOpened(self):
|
||||
return True
|
||||
|
||||
class Chess(VideoSynthBase):
|
||||
def __init__(self, **kw):
|
||||
super(Chess, self).__init__(**kw)
|
||||
|
||||
w, h = self.frame_size
|
||||
|
||||
self.grid_size = sx, sy = 10, 7
|
||||
white_quads = []
|
||||
black_quads = []
|
||||
for i, j in np.ndindex(sy, sx):
|
||||
q = [[j, i, 0], [j+1, i, 0], [j+1, i+1, 0], [j, i+1, 0]]
|
||||
[white_quads, black_quads][(i + j) % 2].append(q)
|
||||
self.white_quads = np.float32(white_quads)
|
||||
self.black_quads = np.float32(black_quads)
|
||||
|
||||
fx = 0.9
|
||||
self.K = np.float64([[fx*w, 0, 0.5*(w-1)],
|
||||
[0, fx*w, 0.5*(h-1)],
|
||||
[0.0,0.0, 1.0]])
|
||||
|
||||
self.dist_coef = np.float64([-0.2, 0.1, 0, 0])
|
||||
self.t = 0
|
||||
|
||||
def draw_quads(self, img, quads, color = (0, 255, 0)):
|
||||
img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
|
||||
img_quads.shape = quads.shape[:2] + (2,)
|
||||
for q in img_quads:
|
||||
cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.CV_AA, shift=2)
|
||||
|
||||
def render(self, dst):
|
||||
t = self.t
|
||||
self.t += 1.0/30.0
|
||||
|
||||
sx, sy = self.grid_size
|
||||
center = np.array([0.5*sx, 0.5*sy, 0.0])
|
||||
phi = pi/3 + sin(t*3)*pi/8
|
||||
c, s = cos(phi), sin(phi)
|
||||
ofs = np.array([sin(1.2*t), cos(1.8*t), 0]) * sx * 0.2
|
||||
eye_pos = center + np.array([cos(t)*c, sin(t)*c, s]) * 15.0 + ofs
|
||||
target_pos = center + ofs
|
||||
|
||||
R, self.tvec = common.lookat(eye_pos, target_pos)
|
||||
self.rvec = common.mtx2rvec(R)
|
||||
|
||||
self.draw_quads(dst, self.white_quads, (245, 245, 245))
|
||||
self.draw_quads(dst, self.black_quads, (10, 10, 10))
|
||||
|
||||
|
||||
classes = dict(chess=Chess)
|
||||
|
||||
presets = dict(
|
||||
empty = 'synth:',
|
||||
lena = 'synth:bg=../cpp/lena.jpg:noise=0.1',
|
||||
chess = 'synth:class=chess:bg=../cpp/lena.jpg:noise=0.1:size=640x480'
|
||||
)
|
||||
|
||||
|
||||
def create_capture(source = 0, fallback = presets['chess']):
|
||||
'''source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'
|
||||
'''
|
||||
source = str(source).strip()
|
||||
chunks = source.split(':')
|
||||
# hanlde drive letter ('c:', ...)
|
||||
if len(chunks) > 1 and len(chunks[0]) == 1 and chunks[0].isalpha():
|
||||
chunks[1] = chunks[0] + ':' + chunks[1]
|
||||
del chunks[0]
|
||||
|
||||
source = chunks[0]
|
||||
try: source = int(source)
|
||||
except ValueError: pass
|
||||
params = dict( s.split('=') for s in chunks[1:] )
|
||||
|
||||
cap = None
|
||||
if source == 'synth':
|
||||
Class = classes.get(params.get('class', None), VideoSynthBase)
|
||||
try: cap = Class(**params)
|
||||
except: pass
|
||||
else:
|
||||
cap = cv2.VideoCapture(source)
|
||||
if 'size' in params:
|
||||
w, h = map(int, params['size'].split('x'))
|
||||
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, w)
|
||||
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, h)
|
||||
if cap is None or not cap.isOpened():
|
||||
print 'Warning: unable to open video source: ', source
|
||||
if fallback is not None:
|
||||
return create_capture(fallback, None)
|
||||
return cap
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
import getopt
|
||||
|
||||
print __doc__
|
||||
|
||||
args, sources = getopt.getopt(sys.argv[1:], '', 'shotdir=')
|
||||
args = dict(args)
|
||||
shotdir = args.get('--shotdir', '.')
|
||||
if len(sources) == 0:
|
||||
sources = [ 0 ]
|
||||
|
||||
caps = map(create_capture, sources)
|
||||
shot_idx = 0
|
||||
while True:
|
||||
imgs = []
|
||||
for i, cap in enumerate(caps):
|
||||
ret, img = cap.read()
|
||||
imgs.append(img)
|
||||
cv2.imshow('capture %d' % i, img)
|
||||
ch = 0xFF & cv2.waitKey(1)
|
||||
if ch == 27:
|
||||
break
|
||||
if ch == ord(' '):
|
||||
for i, img in enumerate(imgs):
|
||||
fn = '%s/shot_%d_%03d.bmp' % (shotdir, i, shot_idx)
|
||||
cv2.imwrite(fn, img)
|
||||
print fn, 'saved'
|
||||
shot_idx += 1
|
||||
cv2.destroyAllWindows()
|
Binary file not shown.
Loading…
Reference in New Issue