motion detector csensibility corrected. some comments added

master
acastro 5 years ago
parent b99136a813
commit e0ff1654dd

@ -1,5 +1,5 @@
#!/usr/bin/env python
#defines appearance for LEDs, such as bouncing effect, circular effect.
import Adafruit_WS2801
import Adafruit_GPIO.SPI as SPI
import time
@ -17,6 +17,7 @@ pixels = Adafruit_WS2801.WS2801Pixels(PIXEL_COUNT, spi=SPI.SpiDev(SPI_PORT, SPI_
color_guru = [146, 200, 0] # purple
color_pirate = [255, 0, 200] # yellow
color_announcer= [0, 100, 200] # aqua blue
color_activation= [255, 255, 255] # white
def leds_color_intensity(color, intensity):
# color: a list of 3 rgb components 0 to 255
@ -31,7 +32,7 @@ def leds_color_intensity(color, intensity):
int(g*intensity),
int(b*intensity) ) # Set the RGB color (0-255) of pixel i
pixels.show()
# LEDs grow and shrink
@ -52,7 +53,7 @@ def leds_color_length(color, intensity):
pixels.show()
pixels.clear()
# LEDS for pirate
def leds_pirate_blink(color):
@ -62,20 +63,20 @@ def leds_pirate_blink(color):
g = color[1]
b = color[2]
while True:
pixels.clear()
pixels.show()
time.sleep(0.02)
for i in range(PIXEL_COUNT):
pixels.set_pixel_rgb(i,
int(r),
int(g),
int(b) ) # Set the RGB color (0-255) of pixel i
pixels.show()
time.sleep(0.01)
def leds_pirate_circle(color):
# color: a list of 3 rgb components 0 to 255
@ -96,8 +97,8 @@ def leds_pirate_circle(color):
pixels.clear()
pixels.show()
time.sleep(0.3)
def leds_pirate_bounce(color):
# color: a list of 3 rgb components 0 to 255
# pixN: number of pixels
@ -105,10 +106,10 @@ def leds_pirate_bounce(color):
g = color[1]
b = color[2]
leds = list(range(PIXEL_COUNT))+ (list(range(PIXEL_COUNT))[::-1])
n = 0
n = 0
while True:
n = n + 1
for i in leds:
for i in leds:
pixels.set_pixel_rgb(i,
int(r),
int(g),
@ -122,10 +123,30 @@ def leds_pirate_bounce(color):
if n > 3:
break
# activation mode is one white leds bounce
def leds_activation(color):
# color: a list of 3 rgb components 0 to 255
# pixN: number of pixels
r = color[0]
g = color[1]
b = color[2]
leds = list(range(PIXEL_COUNT))+ (list(range(PIXEL_COUNT))[::-1])
n = 0
while True:
n = n + 1
for i in leds:
pixels.set_pixel_rgb(i,
int(r),
int(g),
int(b) ) # Set the RGB color (0-255) of pixel i
pixels.show()
time.sleep(0.01)
pixels.clear()
pixels.show()
time.sleep(0.1)
pixels.clear() # Clear all the pixels to turn them off.
if n > 0:
break
pixels.clear() # Clear all the pixels to turn them off.

@ -1,18 +1,121 @@
## IRIS
###### WELCOME TO THE README PAGE of # IRIS 0.5 # ######
Iris 0.5 is a smart machine that interacts with you via audio, camera and visual effects. It aims to be installed in a workplace environment. Iris has 3 characters: guru, pirate and announcer. The files follow this nomination.
## THE FILES ##
* The .json files (“guru.json”, “rebel.json” and “announcer.json”) are the files where all the sentences the characters say are stored.
* The script “guru-pirate.py” is the script that combines content of guru and pirate (from json files) to play their messages. It also integrates LEDs when the characters speak. When characters speak the LEDs light up and perform effects. This script runs when the camera detects motion.
* The script "motion_detector_2.py" is used to detect motion from camera connected to raspberry pi.
* The script "announcements.py" plays the messages of the Announcer (from json file)
* The credits for this project are under the script “colophon.py”, they are read out loud with espeak when covering the camera with a finger for a few seconds.
* “Motion.sh” is the script from where you bring everything to life. Just run ./motion.sh on your terminal.
## INSTALL INDEPENDENCIES ##
* 2018-12-12
* Performed in linux Debian
* Raspberry Pi 3 B+
* Pi Camera v2.1
* LEDs — WS2801B RGB LED Streifen Farbeffekte
# 1 Install Pip #
sudo apt-get update
sudo apt-get install python-pip
# 2 Check python2 version #
python --version
# 3 Check pip version #
pip --version
# 4 Properly install Setuptools module #
sudo apt-get install python-dev python-setuptools
# 5 Install opencv dependencies with pip #
sudo pip install opencv-python
# 6 Install imutils dependencies with pip #
sudo pip install imutils
# 7 Install Pillow dependencies with pip #
sudo pip install Pillow
# 8 Install espeak (to play the pirate) #
sudo apt-get install espeak
# 9 Install aplay (to play the colophon) #
sudo apt-get install aplay
# 10 Install sox (to play the colophon) #
sudo apt-get install sox
## RUN THE PROGRAM ON RASPBERRY PI AT STARTUP ##
Tutorial on how to run a program on your Raspberry Pi at startup:
https://www.dexterindustries.com/howto/run-a-program-on-your-raspberry-pi-at-startup/
The fourth method to run a program on your Raspberry Pi at startup is to use the systemd files. systemd provides a standard process for controlling what programs run when a Linux system boots up. Note that systemd is available only from the Jessie versions of Raspbian OS.
— Step 1: Create A Unit File
Open a sample unit file using the command as shown below:
sudo nano /lib/systemd/system/sample.service
Add in the following text:
[Unit]
Description=My Sample Service
After=multi-user.target
[Service]
Type=idle
ExecStart=/usr/bin/python /home/pi/sample.py
[Install]
WantedBy=multi-user.target
You should save and exit the nano editor.
This defines a new service called “Sample Service” and we are requesting that it is launched once the multi-user environment is available. The “ExecStart” parameter is used to specify the command we want to run. The “Type” is set to “idle” to ensure that the ExecStart command is run only when everything else has loaded. Note that the paths are absolute and define the complete location of Python as well as the location of our Python script.
In order to store the scripts text output in a log file you can change the ExecStart line to:
ExecStart=/usr/bin/python /home/pi/sample.py > /home/pi/sample.log 2>&1
The permission on the unit file needs to be set to 644:
sudo chmod 644 /lib/systemd/system/sample.service
— Step 2: Configure systemd
Now the unit file has been defined we can tell systemd to start it during the boot sequence:
sudo systemctl daemon-reload
sudo systemctl enable sample.service
Reboot the Pi and your custom service should run:
sudo reboot
## LOCATION
* scripts locations /var/www/html/lifeHackAgent
* audio recording script /var/www/html/lifeHackAgent/Audio_recordings
## test system:
## ORIGINAL FILE
* guru-pirate: /var/www/html/lifeHackAgent/guru-pirate.py
* /var/www/html/lifeHackAgent/announcer.py
* colophon: /var/www/html/lifeHackAgent/colophon/
## TEST SYSTEM:
* `./motion.sh`
## cronjob:
runs announcements.py
## CRONJOB:
runs announcements.py
# systemd service file
## systemd service file
* run on boot
* location: /lib/systemd/system/lifehack.service
* status: sudo systemctl ststatus lifehack.service
* start: sudo systemctl start lifehack.service
* stop: sudo systemctl stop lifehack.service
####################################################################################################
# Iris Version 0.5 Contributors: Gill Baldwin, Simon Browne, Tancredi Di Giovanni, Paloma García, Rita Graça, Artemis Gryllaki, Pedro Sá Couto, Biyi Wen, Bohye Woo, Silvio Lorusso, Aymeric Mansoux, André Castro, Steve Rushton, Michael Murtaugh, Leslie Robbins. Produced and published by the Experimental Publishing (XPUB) program of the Piet Zwart Institute, Rotterdam, December 2018. A collaboration between the Research Department of Het Nieuwe Instituut and XPUB.
# You can find IRIS launching:https://burnout.hetnieuweinstituut.nl/en/activities/life-hacks-introducing-iris
####################################################################################################

@ -4,6 +4,9 @@ from pprint import pprint
from time import sleep
from LEDfunctions import *
# this script plays the content of the announcer (from json file)
# when characters speak the LEDs light up and perform effects
def vu_2_leds(color):
while True:
data = play_process.stdout.readline()
@ -11,7 +14,7 @@ def vu_2_leds(color):
pixels.clear() # make LEDs dark
pixels.show()
break
data = data.rstrip()
data = data.rstrip()
if data.endswith("%"):
vu = float(data[:-1][-3:])/100 # 0-100
leds_color_intensity(color, vu)
@ -23,26 +26,26 @@ announcer_f = open(pwd + "announcer.json", "r").read()
announcer = json.loads(announcer_f)
# print(announcer)
position_file = open(pwd + "announcements_position.txt", "r")
position_file = open(pwd + "announcements_position.txt", "r")
#choosing next message in sequence
msg_sf_len=len( announcer['messages'] )
position= (int( position_file.read() ) ) + 1
if position == msg_sf_len:
position = 0
position = 0
print(position)
position_file = open(pwd + "announcements_position.txt", "w")
position_file = open(pwd + "announcements_position.txt", "w")
position_file.write(str(position))
position_file.close()
print ("position", position)
# ANNOUNCER PART
# msg = random.choice( announcer['messages'] ) #choosing a random intro.
sound_dir = pwd + "Audio_recordings/Announcements/"
intro_sf = sound_dir + random.choice( announcer['introductions'] )
sound_dir = pwd + "Audio_recordings/Announcements/"
intro_sf = sound_dir + random.choice( announcer['introductions'] )
msg_sf = sound_dir + announcer['messages'][position]
print(intro_sf, msg_sf)
@ -50,11 +53,8 @@ print(intro_sf, msg_sf)
#play audio
print(intro_sf)
#play_process = subprocess.Popen(["aplay", intro_sf, "-f", "cd", "--vumeter=mono"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
os.system('play -q "{}" gain 5'.format(intro_sf) )
play_process = subprocess.Popen(["aplay", msg_sf, "-f", "cd", "--vumeter=mono"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
vu_2_leds(color_announcer)
#os.system('play -q "{}" gain 1'.format(msg_sf) )

@ -1,87 +1,76 @@
# import the necessary packages
#!/usr/bin/env python
# coding=utf-8
# trigger espeak colophon by covering the camera with a finger
# import dependencies
# sudo pip install PiCamera[array]
# sudo pip install aplay
import imutils
from imutils.video import VideoStream
import argparse
import datetime
from io import BytesIO
from PIL import Image
import time, sys
from time import sleep
import cv2
import datetime
from datetime import datetime
from subprocess import call
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-a", "--min-area", type=int, default=5000, help="minimum area size")
args = vars(ap.parse_args())
# using picamera as default
# check open cv version
print(cv2.__version__)
# choose video source from camera pi
vs = VideoStream(usePiCamera=True).start()
# let camera warm up
sleep(2.0)
# initialize the first frame in the video stream
firstFrame = None
occupied = False
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
frame = vs.read()
frame = frame if args.get("video", None) is None else frame[1]
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if frame is None:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (210, 210), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
image = vs.read()
count = 0
success = True
buffer = []
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
playing = False
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
while success:
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
# save live frame as JPEG file
cv2.imwrite("check_frame.jpg", image)
image = vs.read()
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
if not occupied:
occupied = True
print ('occupied')
sys.exit()
# open the frame
img = Image.open("check_frame.jpg")
# resize the frame
img2 = img.resize((1, 1))
# get the color of pixel
color = img2.getpixel((0, 0))
# print the color of pixel
print('#{:02x}{:02x}{:02x}'.format(*color))
# create a sum of the 3 parts that constitute the value
sum = color[0] + color[1] + color[2]
# this allows the color not to be complete black, but also dark shades, by raising the sum value, we can increase the sensability
if sum < 10 :
buffer.append(True)
else :
# the values are set to be datetime.now to prevent the triggering of the colophon while all the values are False
buffer.append(datetime.now())
if occupied and text == "Unoccupied":
occupied = False
print ("Unoccupied")
# create an array with all the values that we got, when the array is bigger than 300 values, start erasing being the first one first to go
if len(buffer) > 300:
buffer.pop(0)
print(buffer)
# show the frame and record if the user presses a key
# cv2.imshow("Security Feed", frame)
cv2.imwrite('normal.jpg', frame)
#cv2.imshow("Thresh", thresh)
cv2.imwrite('thereshold.jpg', thresh)
#cv2.imshow("Frame Delta", frameDelta)
cv2.imwrite('Gaussianblur.jpg', frameDelta)
#key = cv2.waitKey(1) & 0xFF
# For calling the colophon.wav some conditions must be met.
# By the order of:
# all the values must be the same
# we need to have at least 200 values, this prevents it from starting when there is just 1 value
if ( (len(set(buffer))==1) & ( len(buffer) > 200 ) & ( playing == False ) ):
print ("All elements in list are same")
call(["aplay", "/home/pi/colophon/colophon.wav"])
# to record the espeak sentence into a .wav file ->
# -> espeak "sentence goes here" -ven+whisper -s 150 --stdout > colophon.wav
# espeak 'Iris Version 0.5 Contributors: Gill Baldwin, Simon Browne, Tancredi Di Giovanni, Paloma García, Rita Graça, Artemis Gryllaki, Pedro Sá Couto, Biyi Wen, Bohye Woo, Silvio Lorusso, Aymeric Mansoux, André Castro, Steve Rushton, Michael Murtaugh, Leslie Robbins. Produced and published by the Experimental Publishing (XPUB) program of the Piet Zwart Institute, Rotterdam, December 2018. A collaboration between the Research Department of Het Nieuwe Instituut and XPUB.
playing = True
# cleanup the camera and close any open windows
vs.stop() if args.get("video", None) is None else vs.release()
#cv2.destroyAllWindows()
else:
print ("All elements in list are not same")

@ -1,9 +1,15 @@
#!/usr/bin/env python
# this script combines the content of guru and pirate (from json files) to play their messages
# it also integrates leds when the characters speak
import json, random, os, subprocess
from pprint import pprint
from time import sleep
from LEDfunctions import *
# when characters speak the LEDs light up and perform effects
def vu_2_leds(color):
while True:
data = play_process.stdout.readline()
@ -11,22 +17,21 @@ def vu_2_leds(color):
pixels.clear() # make LEDs dark
pixels.show()
break
data = data.rstrip()
data = data.rstrip()
if data.endswith("%"):
vu = float(data[:-1][-3:])/100 # 0-100
leds_color_intensity(color, vu)
def leds_start_stop(color): # for pirate
while True:
def leds_start_stop(color):
while True:
data = play_process.stdout.readline()
data = data.rstrip()
print('data:',data)
print('process:', play_process.stdout.readline()) #_handle_exitstatus
leds_pirate_bounce(color_pirate)
leds_pirate_bounce(color_pirate)
# open json files
# open json files, where are all the phrases
pwd = os.path.dirname( os.path.realpath(__file__) ) + "/"
oracle_f = open(pwd + "guru.json", "r").read()
oracle = json.loads(oracle_f)
@ -34,7 +39,7 @@ rebel_f = open(pwd + "rebel.json", "r").read()
rebel = json.loads(rebel_f)
# GURU PART
# guru part, using audio recordings
keys = list( oracle.keys())
part = random.choice(keys) # choose: Repeat OR Ask Yourself
print(part)
@ -47,32 +52,29 @@ play_process = subprocess.Popen(["aplay", intro_sound, "-f", "cd", "--vumeter=m
vu_2_leds(color_guru)
# REBEL reply
rebel_run = random.choice([True, False]) # shall the rebel enter??
# pirate reply
rebel_run = random.choice([True, False]) # 50/50 chances of intervening
if rebel_run is True:
rebel_reply_snippet = intro[2] # comes from guru/oracle dictionary
rebel_reply_snippet = intro[2] # comes from guru/oracle json file
rebel_reply = random.choice( rebel[part] )
rebel_sentence = rebel_reply.format(rebel_reply_snippet)
# play msg
# guru part, using audio recordings
for i in range( random.randint(1,4) ):
sleep( random.randint(1,3) )
sleep( random.randint(1,1) )
msg = random.choice(oracle[part]['messages'])
msg_txt = msg[0]
msg_sound = sound_dir + msg[1]
print('MSG:', msg_txt, msg_sound)
play_process = subprocess.Popen(["aplay", msg_sound, "-f", "cd", "--vumeter=mono"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
vu_2_leds(color_guru)
vu_2_leds(color_guru) #LEDs guru
# rebel text to speech
# pirate part, using espeak
if rebel_run is True:
print('rebel sentence:', rebel_sentence)
#rebel_cmd = 'echo "{}" | espeak -ven+whisper -s 150'.format(rebel_sentence)
#os.system(rebel_cmd)
# TO DO
play_process = subprocess.Popen(["espeak", rebel_sentence, "-ven+whisper", "-s", "150"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
leds_pirate_bounce(color_pirate)
# leds_start_stop(color_pirate)
play_process = subprocess.Popen(["espeak", rebel_sentence, "-ven+whisper", "-s", "150"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) # text to speech using espeak
leds_pirate_bounce(color_pirate) # LEDs pirate

@ -1,9 +1,10 @@
#!/bin/sh
# location of the directory
DIR=/var/www/html/lifeHackAgent/
while [ 1 ]
do
python "$DIR"motion_detector_2.py -a 10
python "$DIR"motion_detector_2.py -a 6000
python "$DIR"guru-pirate.py
sleep 10
sleep 5
done

@ -4,7 +4,15 @@
# python motion_detector.py
# python motion_detector.py --video videos/example_01.mp4
# import the necessary packages
# sudo pip install PiCamera[array]
#This script is used to detect motion from camera connected to raspberry pi
#LED light strip lights when motion is detected
#Gaussian blur is used in motion detection, sensitivity of motion detection
#to adjust change "minimum area size"
import imutils
from imutils.video import VideoStream
import argparse
@ -12,19 +20,42 @@ import datetime
import time, sys
from time import sleep
import cv2
from LEDfunctions import *
#LED for motion detection, when motion is detected, LED is lighted
def vu_2_leds(color):
while True:
data = play_process.stdout.readline()
if not data:
pixels.clear() # make LEDs dark
pixels.show()
break
data = data.rstrip()
if data.endswith("%"):
vu = float(data[:-1][-3:])/100 # 0-100
leds_color_intensity(color, vu)
def leds_start_stop(color): # for pirate
while True:
data = play_process.stdout.readline()
data = data.rstrip()
print('data:',data)
print('process:', play_process.stdout.readline()) #_handle_exitstatus
leds_pirate_bounce(color_pirate)
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-a", "--min-area", type=int, default=6000, help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
# we are reading from the pi camera
vs = VideoStream(usePiCamera=True).start()
sleep(2.0)
sleep(0.5)
# initialize the first frame in the video stream
firstFrame = None
occupied = False
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
@ -32,8 +63,7 @@ while True:
frame = vs.read()
frame = frame if args.get("video", None) is None else frame[1]
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
# if the frame could not be grabbed, then we have reached the end of the video
if frame is None:
break
@ -47,13 +77,11 @@ while True:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
# compute the absolute difference between the current frame and the first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
# dilate the thresholded image to fill in holes, then find contours on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
@ -73,21 +101,27 @@ while True:
if not occupied:
occupied = True
print ('occupied')
# led activation bounce will preform
play_process = leds_activation(color_activation)
# break the while true loop, this will allow the motion.sh loop to preform the second part -> guru-pyrate.py
sys.exit()
if occupied and text == "Unoccupied":
occupied = False
print ("Unoccupied")
#check images for debugging
# # show the frame and record if the user presses a key
# # cv2.imshow("Security Feed", frame)
# cv2.imwrite('security.jpg', frame)
# cv2.imwrite('RegularCamera.jpg', frame)
# #cv2.imshow("Thresh", thresh)
# cv2.imwrite('threshold.jpg', thresh)
# #cv2.imshow("Frame Delta", frameDelta)
# cv2.imwrite('blured.jpg', frameDelta)
# #key = cv2.waitKey(1) & 0xFF
#
# # if the `q` key is pressed, break from the lop
# #if key == ord("q"):
# # break

Loading…
Cancel
Save