Statistics: Posted by pageauc — Wed Sep 03, 2014 8:19 pm
Statistics: Posted by pageauc — Wed Sep 03, 2014 8:06 pm
Statistics: Posted by Alan — Wed Sep 03, 2014 6:58 pm
#!/usr/bin/env python
# opencv-find-face : Opencv face tracking with pan/tilt search and lock
# written by Claude Pageau -
# This is a little laggy but does work OK.
# Uses pipan.py module from openelectrons.com RPI camera pan/tilt to control
# camera tracking or use your own pan/tilt module and modify code accordingly.
# if you are not using openelectrons.com pan/tilt hardware.
# Also picamera python module must be installed as well as opencv
# To install opencv and python for opencv
# sudo apt-get install libopencv-dev python-opencv
# To install picamera python module
# sudo apt-get install python-picamera
# You will also need to install python picamera.array tha includes numpy
# sudo pip install "picamera[array]"
# copy /usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml
# to same folder that this python script is in.
# Note
# v4l2 driver is not used since stream is created using picamera module
# using picamera.array
# If you have any questions email pageauc@gmail.com
print "Initializing ...."
import io
import time
import picamera
import picamera.array
import cv2
import numpy as np
# openelectron.com python module and files from the OpenElectron RPI camera pan/tilt
# Copy pipan.py to same folder as this script.
import pipan
p = pipan.PiPan()
# To speed things up, lower the resolution of the camera
CAMERA_WIDTH = 320
CAMERA_HEIGHT = 240
# Show opencv window
show_window = False
# Camera center of image
cam_cx = CAMERA_WIDTH / 2
cam_cy = CAMERA_HEIGHT / 2
inch = 9.0
# bounds checking for pan/tilt search.
limit_y_bottom = 90
limit_y_top = 150
limit_y_level = 140
limit_x_left = 60
limit_x_right = 240
# Approx Center of Pan/Tilt motion
pan_x_c = 120
pan_y_c = 130
# Set Initial starting position of pan/tilt
pan_cx = pan_x_c
pan_cy = pan_y_c
# Amount pan/tilt moves when searching
pan_move_x = 30
pan_move_y = 20
# Timer seconds to wait before starting pan/tilt search for face.
# local face search
face_timer1 = 15
# Wide face search
face_timer2 = 30
# Stop pan/tilt and start Motion Detect.
face_timer3 = 45
# Motion scan settings
motion_detected = False
# sensitivity - How much the color value (0-255) needs to change to be considered a change
sensitivity = 25
# threshold - How many pixels must change to be considered motion
threshold = CAMERA_WIDTH * CAMERA_HEIGHT * 2 / 110
# Move the pan/tilt to a specific location. has built in limit checks.
def pan_goto(x,y):
p.do_pan (int(x))
p.do_tilt (int(y))
def motion_scan(x,y):
print "motion_scan - Scan for Motion at cx=%d cy=%d" % (x, y)
pan_goto(x,y)
numImages = 0
step = 1 # use this to toggle where the image gets saved
captureCount = 0 # flag used to begin a sequence capture
stream = io.BytesIO()
with picamera.PiCamera() as camera:
camera.resolution = (CAMERA_WIDTH, CAMERA_HEIGHT)
time.sleep(2)
# begin motion scan
while sensitivity > 0:
camera.resolution = (CAMERA_WIDTH, CAMERA_HEIGHT)
if step == 1:
stream.seek(0)
camera.capture(stream, 'bgra', True)
data1 = np.fromstring(stream.getvalue(), dtype=np.uint8)
step = 2
else:
stream.seek(0)
camera.capture(stream, 'bgra', True)
data2 = np.fromstring(stream.getvalue(), dtype=np.uint8)
step = 1
numImages = numImages + 1
if numImages > 4: # ignore first few images because if the camera is not quite ready it will register as motion right away
if captureCount <= 0:
# not capturing, test for motion (very simplistic, but works good enough for my purposes)
data3 = np.abs(data1 - data2) # get difference between 2 successive images
# There are 4 times the number of pixels due to rgba
numTriggers = np.count_nonzero(data3 > sensitivity) / 4 / sensitivity
if numTriggers > threshold:
print "motion_scan - Motion Detected. Threshold=%d Triggers=%d" % (threshold, numTriggers)
stream.close()
return True
def face_scan(x,y):
print "face_scan - Start Scan at cx=%d cy=%d" % (x, y)
pan_parked = False
# Face detection opencv center of face box
face_cx = x
face_cy = y
# Pan/Tilt motion center point
pan_cx = x
pan_cy = y
# Put pan/tilt in a known good position.
pan_goto(pan_x_c, pan_y_c)
face_found = False
start_time = time.time()
# load a cascade file for detecting faces. This file must be in
# same folder as this script. or loaded as part of opencv at /usr/share/opencv/haarcascades/
face_cascade = cv2.CascadeClassifier('/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml')
# face_cascade = cv2.CascadeClassifier('/usr/share/opencv/haarcascades/haarcascade_profileface.xml')
# Saving the picture to an in-program stream rather than a file
stream = io.BytesIO()
with picamera.PiCamera() as camera:
camera.resolution = (CAMERA_WIDTH, CAMERA_HEIGHT)
camera.vflip = True
while(True):
with picamera.array.PiRGBArray(camera) as stream:
camera.capture(stream, format='bgr')
# At this point the image is available as stream.array
image = stream.array
# Convert to grayscale, which is easier
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
# Look for faces over the given image using the loaded cascade file
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
face_found = True
#print "face_scan - Face Found at x=%d y=%d w=%d h=%d" % (x, y, w, h)
pan_parked = False
start_time = time.time()
distance = ((CAMERA_WIDTH/w) * inch)/12
face_cx = x + w/2
Nav_LR = (cam_cx - face_cx) / 5
pan_cx = pan_cx - Nav_LR
face_cy = y + h/2
Nav_UD = (cam_cy - face_cy) / 4
pan_cy = pan_cy - Nav_UD
# Print Navigation required to center face in image
if ( abs(Nav_LR)>5 or abs(Nav_UD)>3 ):
pan_goto(pan_cx, pan_cy)
print "face_scan - Nav LR=%s UD=%s Range=%.1f ft" % (Nav_LR, Nav_UD, distance)
if show_window:
# Opencv has built in image manipulation functions
cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)
# Use opencv built in window to show the image
# Leave out if your Raspberry Pi isn't set up to display windows
if show_window:
cv2.imshow('Face Image', image)
elapsed_time = time.time() - start_time
# start pan/tilt search for face if timer runs out
if elapsed_time > face_timer3:
if not pan_parked:
pan_parked = True
stream.close()
# Return and start motion capture
return (pan_cx, pan_cy, False)
elif elapsed_time > face_timer2:
face_found = False
# print "face_scan - Wide Search Timer2=%d > %s seconds" % (elapsed_time, face_timer2)
pan_cx = pan_cx + pan_move_x
if pan_cx > limit_x_right:
pan_cx = limit_x_left
pan_cy = pan_cy + pan_move_y
if pan_cy > limit_y_top:
pan_cy = limit_y_bottom
pan_goto (pan_cx, pan_cy)
elif elapsed_time > face_timer1:
face_found = False
# print "face_scan - Local Search Timer1=%d > %s seconds" % (elapsed_time, face_timer1)
pan_cx = pan_cx + pan_move_x
if (pan_cx > limit_x_right - (pan_move_x * 2)):
pan_cx = limit_x_left + pan_move_x
pan_cy = pan_cy + pan_move_y
if (pan_cy > limit_y_top - pan_move_y):
pan_cy = limit_y_bottom + pan_move_y
pan_goto (pan_cx, pan_cy)
if cv2.waitKey(1) & 0xFF == ord('q'):
quit()
#return (pan_cx, pan_cy, True)
# ---------------- Start Main Program
motion_detected = False
q_pressed = False
while (not q_pressed):
if motion_detected:
(pan_cx, pan_cy, motion_detected) = face_scan(pan_x_c, pan_y_c)
if motion_detected:
quit()
#q_pressed = True
else:
motion_detected = motion_scan(pan_x_c, pan_y_c)
# Close Window
print "Exiting Program ...."
cv2.destroyAllWindows()
camera.close()
quit()
Statistics: Posted by pageauc — Wed Sep 03, 2014 6:48 pm
Statistics: Posted by pageauc — Wed Sep 03, 2014 2:40 pm