Вы находитесь на странице: 1из 129

1

2
# Hello World Example
# Welcome to the OpenMV IDE! Click on the gear button above to run the script!

import sensor, image, time


sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
//sensor.QQVGA: 160x120,sensor.QQVGA2: 128x160 (一般用于 LCD
//sensor.QVGA: 320x240,
//sensor.VGA: 640x480,
//sensor.QQCIF: 88x72,
//sensor.QCIF: 176x144,sensor.CIF: 352x288
sensor.skip_frames(time = 2000) # Wait for settings take effect.
clock = time.clock() # Create a clock object to track the FPS.
while(True):
clock.tick() # Update the FPS clock.
img = sensor.snapshot() # Take a picture and return the image.

3
print(clock.fps())

## Delay 和 Timing

#导入 utime,必须
import utime

utime.sleep(1) # 延时 1s
utime.sleep_ms(500) # 延时 500ms
utime.sleep_us(10) # 延时 10us

#下面两句一起用测量一段程序的执行时间
start = utime.ticks_ms() # 计开始时刻值
#代码执行段...
delta = time.ticks_diff(utime.ticks_ms(), start) # 计时代码执行段
Pin.AF_OD initialize the pin to alternate-function mode with an open-drain drive
Pin.AF_PP initialize the pin to alternate-function mode with a push-pull drive
Pin.ANALOG initialize the pin to analog mode

4
Pin.IN initialize the pin to input mode
Pin.OUT_OD initialize the pin to output mode with an open-drain drive
Pin.OUT_PP initialize the pin to output mode with a push-pull drive
Pin.PULL_DOWN enable the pull-down resistor on the pin
Pin.PULL_NONE don’t enable any pull up or down resistors on the pin
Pin.PULL_UP enable the pull-up resistor on the pin

# Pin Control Example


#
# This example shows how to use the I/O pins in GPIO mode on your OpenMV Cam.

from pyb import Pin

# Connect a switch to pin 0 that will pull it low when the switch is closed.
# Pin 1 will then light up.
pin0 = Pin('P0', Pin.IN, Pin.PULL_UP)
pin1 = Pin('P1', Pin.OUT_PP, Pin.PULL_NONE)

while(True):
pin1.value(not pin0.value())
示波器测试说明正确。

5
import utime
from pyb import Pin

pin0 = Pin('P0', Pin.OUT_PP, Pin.PULL_NONE)


pin1 = Pin('P1', Pin.OUT_PP, Pin.PULL_NONE)
pin2 = Pin('P2', Pin.OUT_PP, Pin.PULL_NONE)
pin3 = Pin('P3', Pin.OUT_PP, Pin.PULL_NONE)
pin4 = Pin('P4', Pin.OUT_PP, Pin.PULL_NONE)
pin5 = Pin('P5', Pin.OUT_PP, Pin.PULL_NONE)
pin6 = Pin('P6', Pin.OUT_PP, Pin.PULL_NONE)
pin7 = Pin('P7', Pin.OUT_PP, Pin.PULL_NONE)
pin8 = Pin('P8', Pin.OUT_PP, Pin.PULL_NONE)

while(True):
pin0.high()
pin1.high()
pin2.high()
pin3.high()
pin4.high()
pin5.high()

6
pin6.high()
pin7.high()
pin8.high()
utime.sleep_ms(50)

pin0.low()
pin1.low()
pin2.low()
pin3.low()
pin4.low()
pin5.low()
pin6.low()
pin7.low()
pin8.low()
utime.sleep_ms(50)

## General board control


See pyb.

7
import pyb

pyb.repl_uart(pyb.UART(3, 9600, timeout_char=1000)) # duplicate REPL on UART(3)


pyb.wfi() # pause CPU, waiting for interrupt
pyb.stop() # stop CPU, waiting for external interrupt

ADC
See pyb.Pin and pyb.ADC.
说明:
1. P6(PA5)为 ADC 引脚。
2. 此模式为最大电压 3.3V。
import time
from pyb import ADC

adc = ADC('P6') # 一定是 P6

while(True):
# The ADC has 12-bits of resolution for 4096 values.
adcValue = (adc.read() * 3.3) / 4095
print('ADC = %fv' % adcValue)

8
time.sleep(100)

DAC
See pyb.Pin and pyb.DAC.

import time
from pyb import DAC

dac = DAC('P6') # Must always be 'P6'.

while(True):
# DAC 有 8 位或者 12 位,默认 8 位。也就是 256 位。
for i in range(256):
dac.write(i)
time.sleep(20)

9
for i in range(256):
dac.write(255-i)
time.sleep(20)

PWM

Timer 1 Channel 3 Negative -> P0 (PB15)


Timer 1 Channel 2 Negative -> P1 (PB14)
Timer 1 Channel 1 Negative -> P2 (PB13)
Timer 2 Channel 3 Positive -> P4 (PB10)
Timer 2 Channel 4 Positive -> P5 (PB11)
Timer 2 Channel 1 Positive -> P6 (PA5)
Timer 4 Channel 1 Negative -> P7 (PD12)
Timer 4 Channel 2 Negative -> P8 (PD13)
Timer 4 Channel 3 Positive -> P9 (PD14) (OpenMV Cam M7 Only)
import time
from pyb import Pin, Timer

tim = Timer(4, freq=1000) # Frequency in Hz


# Generate a 1KHz square wave on TIM4 with 50% and 75% duty cycles on channels 1 and 2,
respectively.
ch1 = tim.channel(1, Timer.PWM, pin=Pin('P7'), pulse_width_percent=50)

10
ch2 = tim.channel(2, Timer.PWM, pin=Pin('P8'), pulse_width_percent=75)

while (True):
time.sleep(1000)

UART

UART 3 RX -> P5 (PB11)


UART 3 TX -> P4 (PB10)
UART 1 RX -> P0 (PB15) (OpenMV Cam M7 Only)
UART 1 TX -> P1 (PB14) (OpenMV Cam M7 Only)

uart.read(5) # 读入 5 个字符
uart.read() # 读取所有的有效字符

uart.readline() # 读入一行
uart.write('abc') # 向串口写入字符 abc

uart.readchar() # 读入一个字符
uart.writechar(48) # 写入 ASCALL 码为 48 的字符

uart.any() # 判断串口是否有数据,返回数据个数
#

11
# This example shows how to use the serial port on your OpenMV Cam. Attach pin
# P4 to the serial input of a serial LCD screen to see 'Hello World!' printed
# on the serial LCD display.

import time
from pyb import UART

# Always pass UART 3 for the UART number for your OpenMV Cam.
# The second argument is the UART baud rate. For a more advanced UART control
# example see the BLE-Shield driver.
uart = UART(3, 19200, timeout_char = 1000)
testNum=1

while(True):
uart.write('Hello World!\r')
uart.write('send:')
uart.write('%d\n'%testNum)
time.sleep(1000)

12
UART
import time
from pyb import UART

# Always pass UART 3 for the UART number for your OpenMV Cam.
# The second argument is the UART baud rate. For a more advanced UART control
# example see the BLE-Shield driver.
uart = UART(3, 19200, timeout_char = 1000)

while(True):
if uart.any():
tmp_data = uart.readline()
uart.write('receive:%s'%tmp_data)
print(tmp_data)

13
SPI
# SPI Control
#
# This example shows how to use the SPI bus on your OpenMV Cam to directly
# the LCD shield without using the built-in lcd shield driver. You will need
# the LCD shield to run this example.

import sensor, image, time


from pyb import Pin, SPI

cs = Pin('P3', Pin.OUT_OD)
rst = Pin('P7', Pin.OUT_PP)
rs = Pin('P8', Pin.OUT_PP)
# The hardware SPI bus for your OpenMV Cam is always SPI bus 2.
spi = SPI(2, SPI.MASTER, baudrate=int(1000000000/66), polarity=0, phase=0)

def write_command_byte(c):
cs.low()
rs.low()

14
spi.send(c)
cs.high()

def write_data_byte(c):
cs.low()
rs.high()
spi.send(c)
cs.high()

def write_command(c, *data):


write_command_byte(c)
if data:
for d in data: write_data_byte(d)

def write_image(img):
cs.low()
rs.high()
spi.send(img)
cs.high()

15
# Reset the LCD.
rst.low()
time.sleep(100)
rst.high()
time.sleep(100)

write_command(0x11) # Sleep Exit


time.sleep(120)

# Memory Data Access Control


write_command(0x36, 0xC0)

# Interface Pixel Format


write_command(0x3A, 0x05)

# Display On
write_command(0x29)

sensor.reset() # Initialize the camera sensor.


sensor.set_pixformat(sensor.RGB565) # must be this

16
sensor.set_framesize(sensor.QQVGA2) # must be this
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.

while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.

write_command(0x2C) # Write image command...


write_image(img)

print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.

# LCD Example
#
# Note: To run this example you will need a LCD Shield for your OpenMV Cam.
#
# The LCD Shield allows you to view your OpenMV Cam's frame buffer on the go.

17
import sensor, image, lcd

sensor.reset() # Initialize the camera sensor.


sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA2) # Special 128x160 framesize for LCD Shield.
lcd.init() # Initialize the lcd screen.

while(True):
lcd.display(sensor.snapshot()) # Take a picture and display the image.

Timer

# Timer Control Example


#
# This example shows how to use a timer for callbacks.

import time

18
from pyb import Pin, Timer

def tick(timer): # we will receive the timer object when being called
print('Timer callback')

tim = Timer(4, freq=1) # create a timer object using timer 4 - trigger at 1Hz
tim.callback(tick) # set the callback to our tick function

while (True):
time.sleep(1000)

# Servo Control Example


#
# This example shows how to use your OpenMV Cam to control servos.

import time
from pyb import Servo

s1 = Servo(1) # P7

19
s2 = Servo(2) # P8

while(True):
for i in range(1000):
s1.pulse_width(1000 + i)
s2.pulse_width(1999 - i)
time.sleep(10)
for i in range(1000):
s1.pulse_width(1999 - i)
s2.pulse_width(1000 + i)
time.sleep(10)
## RTC 时钟

# RTC Example
#
# This example shows how to use the RTC.
import time
from pyb import RTC
rtc = RTC()
rtc.datetime((2018, 6, 1, 9, 0, 0, 0, 0))

20
while (True):
print(rtc.datetime())
time.sleep(1000)

# RTC Example
#
# This example shows how to use the RTC.
import time
from pyb import RTC
rtc = RTC()
rtc.datetime((2018, 6, 1, 9, 0, 0, 0, 0))

while (True):
print(rtc.datetime())
time.sleep(1000)

21
# Color Drawing Example
#
# This example shows off your OpenMV Cam's built-in drawing capabilities. This
# example was originally a test but serves as good reference code. Please put
# your IDE into non-JPEG mode to see the best drawing quality.

import sensor, image, time

sensor.reset()
sensor.set_framesize(sensor.QVGA)

# All drawing functions use the same code to pass color.


# So we just need to test one function.

while(True):

# Test Draw Line (GRAYSCALE)


sensor.set_pixformat(sensor.GRAYSCALE)
for i in range(10):

22
img = sensor.snapshot()
for i in range(img.width()):
c = ((i * 255) + (img.width()/2)) / img.width()
img.draw_line([i, 0, i, img.height()-1], color = int(c))
sensor.snapshot()
time.sleep(1000)

# Test Draw Line (RGB565)


sensor.set_pixformat(sensor.RGB565)
for i in range(10):
img = sensor.snapshot()
for i in range(img.width()):
c = ((i * 255) + (img.width()/2)) / img.width()
img.draw_line([i, 0, i, img.height()-1], color = [int(c), 0, 0])
sensor.snapshot()
time.sleep(1000)

# Test Draw Line (RGB565)


sensor.set_pixformat(sensor.RGB565)
for i in range(10):

23
img = sensor.snapshot()
for i in range(img.width()):
c = ((i * 255) + (img.width()/2)) / img.width()
img.draw_line([i, 0, i, img.height()-1], color = [0, int(c), 0])
sensor.snapshot()
time.sleep(1000)

# Test Draw Line (RGB565)


sensor.set_pixformat(sensor.RGB565)
for i in range(10):
img = sensor.snapshot()
for i in range(img.width()):
c = ((i * 255) + (img.width()/2)) / img.width()
img.draw_line([i, 0, i, img.height()-1], color = [0, 0, int(c)])
sensor.snapshot()
time.sleep(1000)

24
Framebuffer.
import sensor, image, time

sensor.reset()

sensor.set_contrast(1)
sensor.set_gainceiling(16)

sensor.set_framesize(sensor.QQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)
img = image.Image('/example.bmp', copy_to_fb=True)
# img.draw_line(...)

# Flush FB
sensor.flush()

# Add a small delay to allow the IDE to read the flushed image.
time.sleep(100)

25
import pyb, sensor, image, math

sensor.reset()
sensor.set_framesize(sensor.QVGA)

while(True):
# Test Set Pixel
sensor.set_pixformat(sensor.GRAYSCALE)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)

y = (pyb.rng() % (2*img.height())) - (img.height()//2)


img.set_pixel(x, y, 255)#设置(x,y)点的像素值为 255,即白色,
#灰度图颜色范围 0~255
sensor.set_pixformat(sensor.RGB565)#色彩图
for i in range(10):

26
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
img.set_pixel(x, y, (255, 255, 255))
#设置(x,y)点的颜色为(255,255,255)白色,rgb 色彩图的各通道分别
#代表红绿蓝,值域 0~255

# Test Draw Line 画直线


sensor.set_pixformat(sensor.GRAYSCALE)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x0 = (pyb.rng() % (2*img.width())) - (img.width()//2)
y0 = (pyb.rng() % (2*img.height())) - (img.height()//2)
x1 = (pyb.rng() % (2*img.width())) - (img.width()//2)
y1 = (pyb.rng() % (2*img.height())) - (img.height()//2)
img.draw_line([x0, y0, x1, y1])
#draw_line((x0, y0, x1, y1), color=White),从(x0,y0)到(x1,y1)
#画一条直线,注意[x0, y0, x1, y1]是一个元组,不要漏写括号[ ]或()。

27
#如果是灰度图,color 值域为 0-255;如果是 rgb 图,color 也为一个元组 color=[255,255,255],不要少写括
号。color 默认值为 255 或[255,255,255]白色。
sensor.set_pixformat(sensor.RGB565)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x0 = (pyb.rng() % (2*img.width())) - (img.width()//2)
y0 = (pyb.rng() % (2*img.height())) - (img.height()//2)
x1 = (pyb.rng() % (2*img.width())) - (img.width()//2)
y1 = (pyb.rng() % (2*img.height())) - (img.height()//2)
img.draw_line([x0, y0, x1, y1])

# Test Draw Rectangle 画矩形


sensor.set_pixformat(sensor.GRAYSCALE)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
w = (pyb.rng() % img.width())

28
h = (pyb.rng() % img.height())
img.draw_rectangle([x, y, w, h])

sensor.set_pixformat(sensor.RGB565)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
w = (pyb.rng() % img.width())
h = (pyb.rng() % img.height())
img.draw_rectangle([x, y, w, h])

# Test Draw Circle 画圆


sensor.set_pixformat(sensor.GRAYSCALE)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)

29
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
r = (pyb.rng() % (img.width() if (img.width() > img.height()) else img.height()))
img.draw_circle(x, y, r)
#draw_rectangle(x, y, radius, color=White),以(x,y)为圆心
#画半径为 r 的圆。如果是灰度图,color 值域为 0-255;如果是 rgb 图,
#color 也为一个元组,color=[255,255,255],不要少写括号。color 默认值为 255 或[255,255,255]白色。
sensor.set_pixformat(sensor.RGB565)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
r = (pyb.rng() % (img.width() if (img.width() > img.height()) else img.height()))
img.draw_circle(x, y, r)

# Test Draw String 在图像上打印文字


sensor.set_pixformat(sensor.GRAYSCALE)
for i in range(10):
img = sensor.snapshot()
for j in range(100):

30
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
img.draw_string(x, y, 'Hello\nWorld!')
#draw_string(x, y, text, color=White),以(x,y)为起点在图像上
#打印文字 text,注意 text 是字符串格式,要用双引号括起来;\n 表示换行,
#\r 表示回车;文字大小默认 8x10。如果是灰度图,color 值域为 0-255;
#如果是 rgb 图,color 也为一个元组,color=[255,255,255],不要少写括号。
#color 默认值为 255 或[255,255,255]白色。
sensor.set_pixformat(sensor.RGB565)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
img.draw_string(x, y, 'Hello\nWorld!')

# Test Draw Cross


sensor.set_pixformat(sensor.GRAYSCALE)
for i in range(10):
img = sensor.snapshot()

31
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
img.draw_cross(x, y)
#draw_cross(x, y, size=5, color=White),以(x,y)为中心

sensor.set_pixformat(sensor.RGB565)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
img.draw_cross(x, y)

32
WIFI

# Scan Example
#
# This example shows how to scan for networks with the WiFi shield.

import time, network

wlan = network.WINC()
print('\nFirmware version:', wlan.fw_version())

while (True):
scan_result = wlan.scan()
for ap in scan_result:
print('Channel:%d RSSI:%d Auth:%d BSSID:%s SSID:%s'%(ap))
print()
time.sleep(1000)

33
# MJPEG Streaming AP.
#
# This example shows off how to do MJPEG streaming in AccessPoint mode.
# Chrome, Firefox and MJpegViewer App on Android have been tested.
# Connect to OPENMV_AP and use this URL: http://192.168.1.1:8080 to view the stream.

import sensor, image, time, network, usocket, sys

SSID ='OPENMV_AP' # Network SSID


KEY ='1234567890' # Network key (must be 10 chars)
HOST = '' # Use first available interface
PORT = 8080 # Arbitrary non-privileged port

# Reset sensor
sensor.reset()
# Set sensor settings
sensor.set_contrast(1)
sensor.set_brightness(1)
sensor.set_saturation(1)

34
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.QQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

# Init wlan module in AP mode.


wlan = network.WINC(mode=network.WINC.MODE_AP)
wlan.start_ap(SSID, key=KEY, security=wlan.WEP, channel=2)

# You can block waiting for client to connect


#print(wlan.wait_for_sta(10000))

def start_streaming(s):
print ('Waiting for connections..')
client, addr = s.accept()
# set client socket timeout to 2s
client.settimeout(2.0)
print ('Connected to ' + addr[0] + ':' + str(addr[1]))

# Read request from client


data = client.recv(1024)

35
# Should parse client request here

# Send multipart header


client.send('HTTP/1.1 200 OK\r\n' \
'Server: OpenMV\r\n' \
'Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n' \
'Cache-Control: no-cache\r\n' \
'Pragma: no-cache\r\n\r\n')

# FPS clock
clock = time.clock()

# Start streaming images


# NOTE: Disable IDE preview to increase streaming FPS.
while (True):
clock.tick() # Track elapsed milliseconds between snapshots().
frame = sensor.snapshot()
cframe = frame.compressed(quality=35)
header = '\r\n--openmv\r\n' \
'Content-Type: image/jpeg\r\n'\

36
'Content-Length:'+str(cframe.size())+'\r\n\r\n'
client.send(header)
client.send(cframe)
print(clock.fps())

while (True):
# Create server socket
s = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM)
try:
# Bind and listen
s.bind([HOST, PORT])
s.listen(5)

# Set server socket timeout


# NOTE: Due to a WINC FW bug, the server socket must be closed and reopened if
# the client disconnects. Use a timeout here to close and re-create the socket.
s.settimeout(3)
start_streaming(s)
except OSError as e:
s.close()

37
print('socket error: ', e)
#sys.print_exception(e)

Wifi
import sensor, image, time, network, usocket, sys, json

SSID ='OPENMV_AP' # Network SSID


KEY ='1234567890' # Network key (must be 10 chars)
HOST = '' # Use first available interface
PORT = 8080 # Arbitrary non-privileged port

green_threshold = ( 0, 80, -70, -10, -0, 30)

# Reset sensor
sensor.reset()
# Set sensor settings
sensor.set_contrast(1)
sensor.set_brightness(1)
sensor.set_saturation(1)

38
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.QQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

# Init wlan module in AP mode.


wlan = network.WINC(mode=network.WINC.MODE_AP)
wlan.start_ap(SSID, key=KEY, security=wlan.WEP, channel=2)

# You can block waiting for client to connect


#print(wlan.wait_for_sta(10000))

def response(s):
print ('Waiting for connections..')
client, addr = s.accept()
# set client socket timeout to 2s
client.settimeout(2.0)
print ('Connected to ' + addr[0] + ':' + str(addr[1]))

# Read request from client


data = client.recv(1024)

39
# Should parse client request here

# Send multipart header


client.send('HTTP/1.1 200 OK\r\n' \
'Server: OpenMV\r\n' \
'Content-Type: application/json\r\n' \
'Cache-Control: no-cache\r\n' \
'Pragma: no-cache\r\n\r\n')

# FPS clock
clock = time.clock()

# Start streaming images


# NOTE: Disable IDE preview to increase streaming FPS.

img = sensor.snapshot()
blobs = img.find_blobs([green_threshold])
if blobs:
for b in blobs:
img.draw_rectangle(b[0:4]) # rect

40
img.draw_cross(b[5], b[6]) # cx, cy

client.send(json.dumps(blobs))
client.close()

while (True):
# Create server socket
s = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM)
try:
# Bind and listen
s.bind([HOST, PORT])
s.listen(5)

# Set server socket timeout


# NOTE: Due to a WINC FW bug, the server socket must be closed and reopened if
# the client disconnects. Use a timeout here to close and re-create the socket.
s.settimeout(3)
response(s)
except OSError as e:
s.close()

41
print('socket error: ', e)
#sys.print_exception(e)

Image_Filters advanced_frame_differencing
# Advanced Frame Differencing Example
#
# Note: You will need an SD card to run this example.
#
# This example demonstrates using frame differencing with your OpenMV Cam. This
# example is advanced because it preforms a background update to deal with the
# backgound image changing overtime.

import sensor, image, pyb, os, time

BG_UPDATE_FRAMES = 50 # How many frames before blending.


BG_UPDATE_BLEND = 128 # How much to blend by... ([0-256]==[0.0-1.0]).

42
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
sensor.set_auto_whitebal(False) # Turn off white balance.
clock = time.clock() # Tracks FPS.

if not 'temp' in os.listdir(): os.mkdir('temp') # Make a temp directory

print('About to save background image...')


sensor.skip_frames(time = 2000) # Give the user time to get ready.
sensor.snapshot().save('temp/bg.bmp')
print('Saved background image - Now frame differencing!')

frame_count = 0
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.

43
frame_count += 1
if frame_count > BG_UPDATE_FRAMES:
frame_count = 0
# Blend in new frame. We're doing 256-alpha here because we want to
# blend the new frame into the backgound. Not the background into the
# new frame which would be just alpha. Blend replaces each pixel by
# ((NEW*(alpha))+(OLD*(256-alpha)))/256. So, a low alpha results in
# low blending of the new image while a high alpha results in high
# blending of the new image. We need to reverse that for this update.
img.blend('temp/bg.bmp', alpha=(256-BG_UPDATE_BLEND))
img.save('temp/bg.bmp')

# Replace the image with the 'abs(NEW-OLD)' frame difference.


img.difference('temp/bg.bmp')

print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.

44
Basic_frame_differencing

# Basic Frame Differencing Example


#
# Note: You will need an SD card to run this example.
#
# This example demonstrates using frame differencing with your OpenMV Cam. It's
# called basic frame differencing because there's no background image update.
# So, as time passes the background image may change resulting in issues.

import sensor, image, pyb, os, time

sensor.reset() # Initialize the camera sensor.


sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
sensor.set_auto_whitebal(False) # Turn off white balance.
clock = time.clock() # Tracks FPS.

45
if not 'temp' in os.listdir(): os.mkdir('temp') # Make a temp directory

print('About to save background image...')


sensor.skip_frames(time = 2000) # Give the user time to get ready.
sensor.snapshot().save('temp/bg.bmp')
print('Saved background image - Now frame differencing!')

while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.

# Replace the image with the 'abs(NEW-OLD)' frame difference.


img.difference('temp/bg.bmp')

print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.

46
Color_binary_filter

# Color Binary Filter Example


#
# This script shows off the binary image filter. This script was originally a
# test script... but, it can be useful for showing how to use binary.

import pyb, sensor, image, math

sensor.reset()
sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.RGB565)

red_threshold = (0,100, 0,127, 0,127) # L A B


green_threshold = (0,100, -128,0, 0,127) # L A B
blue_threshold = (0,100, -128,127, -128,0) # L A B
while(True):
# Test red threshold
for i in range(100):

47
img = sensor.snapshot()
img.binary([red_threshold])
# Test green threshold
for i in range(100):
img = sensor.snapshot()
img.binary([green_threshold])
# Test blue threshold
for i in range(100):
img = sensor.snapshot()
img.binary([blue_threshold])
# Test not red threshold
for i in range(100):
img = sensor.snapshot()
img.binary([red_threshold], invert = 1)
# Test not green threshold
for i in range(100):
img = sensor.snapshot()
img.binary([green_threshold], invert = 1)
# Test not blue threshold
for i in range(100):

48
img = sensor.snapshot()
img.binary([blue_threshold], invert = 1)

Edge_detection
# Edge Detection Example:
#
# This example demonstrates using the morph function on an image to do edge
# detection and then thresholding and filtering that image afterwards.

import sensor, image, time

kernel_size = 1 # kernel width = (size*2)+1, kernel height = (size*2)+1


kernel = [-1, -1, -1,\
-1, +8, -1,\
-1, -1, -1]
# This is a high pass filter kernel. see here for more kernels:
# http://www.fmwconcepts.com/imagemagick/digital_image_filtering.pdf
thresholds = [(100, 255)] # grayscale thresholds

49
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.

# On the OV7725 sensor, edge detection can be enhanced


# significantly by setting the sharpness/edge registers.
# Note: This will be implemented as a function later.
if (sensor.get_id() == sensor.OV7725):
sensor.__write_reg(0xAC, 0xDF)
sensor.__write_reg(0x8F, 0xFF)

while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.

img.morph(kernel_size, kernel)
img.binary(thresholds)

50
# Erode pixels with less than 2 neighbors using a 3x3 image kernel
img.erode(1, threshold = 2)

print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.

Erode_and_dilate

# Erode and Dilate Example


#
# This example shows off the erode and dilate functions which you can run on
# a binary image to remove noise. This example was originally a test but its
# useful for showing off how these functions work.

import pyb, sensor, image

sensor.reset()
sensor.set_framesize(sensor.QVGA)

51
grayscale_thres = (170, 255)
rgb565_thres = (70, 100, -128, 127, -128, 127)

while(True):

sensor.set_pixformat(sensor.GRAYSCALE)
for i in range(20):
img = sensor.snapshot()
img.binary([grayscale_thres])
img.erode(2)
for i in range(20):
img = sensor.snapshot()
img.binary([grayscale_thres])
img.dilate(2)

sensor.set_pixformat(sensor.RGB565)
for i in range(20):
img = sensor.snapshot()
img.binary([rgb565_thres])
img.erode(2)

52
for i in range(20):
img = sensor.snapshot()
img.binary([rgb565_thres])
img.dilate(2)

Grayscale_binary_filter

# Grayscale Binary Filter Example


#
# This script shows off the binary image filter. This script was originally a
# test script... but, it can be useful for showing how to use binary.

import pyb, sensor, image, math

sensor.reset()
sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

low_threshold = (0, 50)

53
high_threshold = (205, 255)

while(True):
# Test low threshold
for i in range(100):
img = sensor.snapshot()
img.binary([low_threshold])
# Test high threshold
for i in range(100):
img = sensor.snapshot()
img.binary([high_threshold])
# Test not low threshold
for i in range(100):
img = sensor.snapshot()
img.binary([low_threshold], invert = 1)
# Test not high threshold
for i in range(100):
img = sensor.snapshot()
img.binary([high_threshold], invert = 1)

54
Lens_correction

# Lens Correction
#
# This example shows off how to use the lens correction method to fix lens
# distortion in an image. You need to do this for qrcode / barcode / data matrix
# detection. Increase the strength below until lines are straight in the view.
# Zoom in (higher) or out (lower) until you see enough of the image.

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()

while(True):
clock.tick()

55
img = sensor.snapshot().lens_corr(strength = 1.8, zoom = 1.0)

print(clock.fps())

Line_filter
# Line Filter Example
#
# The sensor module can preform some basic image processing during the image readout without
# Additional overhead. This example shows off how to apply some basic line filters in Python.
#
# WARNING - This feature does Not work fast enough on M4 when line pre-processing is implemented
# in Python. In the future this might be fixed somehow, for now You'll see a partial framebuffer.

import sensor, image, time

# Initialize the camera sensor.


sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)

56
sensor.set_framesize(sensor.QQVGA)
clock = time.clock() # Tracks FPS.

# Copy source to destination.


# Note source is YUYV destination is 1BPP Grayscale
def line_filter_copy(src, dst):
for i in range(0, len(dst), 1):
dst[i] = src[i<<1]

# Segment the image by following thresholds.


# Note source is YUYV destination is 1BPP Grayscale
def line_filter_bw(src, dst):
for i in range(0, len(dst), 1):
if (src[i<<1] > 200 and src[i<<1] < 255):
dst[i] = 0xFF
else:
dst[i] = 0x00

while(True):
clock.tick() # Track elapsed milliseconds between snapshots().

57
lines = 0
img = sensor.snapshot(line_filter = line_filter_copy) # Take a picture and return the image.
#print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.

Mean_filter
# Mean Filter Example
#
# This example shows off mean filtering. Mean filtering is your standard average
# filter in a NxN neighborhood. Mean filtering removes noise in the image by
# bluring everything. But, it's the fastest kernel filter operation.

import sensor, image, time

sensor.reset() # Initialize the camera sensor.


sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.

58
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.

# The only argument is the kernel size. N coresponds to a ((N*2)+1)^2


# kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You
# shouldn't ever need to use a value bigger than 2.
img.mean(1)

print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.

Median_filter
# Median Filter Example
#
# This example shows off median filtering. Median filtering replaces every pixel
# with the median value of it's NxN neighborhood. Median filtering is good for
# removing noise in the image while preserving edges.

59
import sensor, image, time

sensor.reset() # Initialize the camera sensor.


sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.

while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.

# The first argument to the median filter is the kernel size, it can be
# either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second
# argument 'percentile' is the percentile number to choose from the NxN
# neighborhood. 0.5 is the median, 0.25 is the lower quartile, and 0.75
# would be the upper quartile.
img.median(1, percentile=0.5)

print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while

60
# connected to your computer. The FPS should increase once disconnected.

Midpoint_filter
# Midpoint Filter Example
#
# This example shows off midpoint filtering. Midpoint filtering replaces each
# pixel by the average of the min and max pixel values for a NxN neighborhood.

import sensor, image, time

sensor.reset() # Initialize the camera sensor.


sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.

while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.

61
# The first argument is the kernel size. N coresponds to a ((N*2)+1)^2
# kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You
# shouldn't ever need to use a value bigger than 2. The 'bias' argument
# lets you select between min and max blending. 0.5 == midpoint filter,
# 0.0 == min filter, and 1.0 == max filter. Note that the min filter
# makes images darker while the max filter makes images lighter.
img.midpoint(1, bias=0.5)

print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.

Mode_filter
# Mode Filter Example
#
# This example shows off mode filtering. Mode filtering is a highly non-linear
# operation which replaces each pixel with the mode of the NxN neighborhood
# of pixels around it. Avoid using the mode filter on RGB565 images. It will
# cause artifacts on image edges...

62
import sensor, image, time

sensor.reset() # Initialize the camera sensor.


sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.

while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.

# The only argument to the median filter is the kernel size, it can be
# either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively.
img.mode(1)

print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.

63
Rotation_correction
# Rotation Correction
#
# This example shows off how to use the rotation_corr() to undo perspective rotations
# in 3 dimensions along with zooming in and out on the image. While this demo rotates
# the image around for fun you can use this feature to fix perspective issues related
# to how your OpenMV Cam is mounted.

import sensor, image, time

# Degrees per frame to rotation by...


X_ROTATION_DEGREE_RATE = 5
Y_ROTATION_DEGREE_RATE = 0.5
Z_ROTATION_DEGREE_RATE = 0
X_OFFSET = 0
Y_OFFSET = 0

ZOOM_AMOUNT = 1 # Lower zooms out - Higher zooms in

64
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()

x_rotation_counter = 0
y_rotation_counter = 0
z_rotation_counter = 0

while(True):
clock.tick()

img = sensor.snapshot().rotation_corr(x_rotation = x_rotation_counter, \


y_rotation = y_rotation_counter, \
z_rotation = z_rotation_counter, \
x_translation = X_OFFSET, \
y_translation = Y_OFFSET, \
zoom = ZOOM_AMOUNT)

65
x_rotation_counter += X_ROTATION_DEGREE_RATE
y_rotation_counter += Y_ROTATION_DEGREE_RATE
z_rotation_counter += Z_ROTATION_DEGREE_RATE

print(clock.fps())

Sharpen_filter

# Sharpen Filter Example:


#
# This example demonstrates using morph to sharpen images.

import sensor, image, time

kernel_size = 1 # kernel width = (size*2)+1, kernel height = (size*2)+1


kernel = [-1, -1, -1,\
-1, +9, -1,\
-1, -1, -1]
# This is a sharpen filter kernel.

66
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.

while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.

# Run the kernel on every pixel of the image.


img.morph(kernel_size, kernel)

print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.

67
Snapshot

# Emboss Snapshot Example


#
# 注意:运行这个程序需要插入 SD 卡并确认 SD 卡正常工作。
#
# You can use your OpenMV Cam to save modified image files.

import sensor, image, pyb

RED_LED_PIN = 1
BLUE_LED_PIN = 3

sensor.reset() # 初始化 Camera.


sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # 跳过两千帧以待图像稳定

pyb.LED(RED_LED_PIN).on()

68
sensor.skip_frames(time = 2000) # 闪灯,可以忽略.

pyb.LED(RED_LED_PIN).off()
pyb.LED(GREEN_LED_PIN).on()

print('You're on camera!')
img = sensor.snapshot()

img.morph(1, [+2, +1, +0,\


+1, +1, -1,\
+0, -1, -2]) # 对原图像进行浮雕处理.

img.save('example.jpg') # or 'example.bmp' (or others)

pyb.LED(BLUE_LED_PIN).off()
print('Done! Reset the camera to see the saved image.')

69
Snapshot
# Snapshot Example
#
# Note: You will need an SD card to run this example.
#
# You can use your OpenMV Cam to save image files.

import sensor, image, pyb

RED_LED_PIN = 1
BLUE_LED_PIN = 3

sensor.reset() # Initialize the camera sensor.


sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.

70
pyb.LED(RED_LED_PIN).on()
sensor.skip_frames(time = 2000) # Give the user time to get ready.

pyb.LED(RED_LED_PIN).off()
pyb.LED(BLUE_LED_PIN).on()

print('You're on camera!')
sensor.snapshot().save('example.jpg') # or 'example.bmp' (or others)

pyb.LED(BLUE_LED_PIN).off()
print('Done! Reset the camera to see the saved image.')

Snapshot_on_face_detection

#
# This example demonstrates using face tracking on your OpenMV Cam to take a
# picture.

import sensor, image, pyb

71
RED_LED_PIN = 1
BLUE_LED_PIN = 3

sensor.reset() # 初始化摄像头
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.HQVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.

face_cascade = image.HaarCascade('frontalface', stages=25)

while(True):

pyb.LED(RED_LED_PIN).on()
print('About to start detecting faces...')
sensor.skip_frames(time = 2000) # Give the user time to get ready.

pyb.LED(RED_LED_PIN).off()
print('Now detecting faces!')
pyb.LED(BLUE_LED_PIN).on()

72
diff = 10 # We'll say we detected a face after 10 frames.
while(diff):
img = sensor.snapshot()
# Threshold can be between 0.0 and 1.0. A higher threshold results in a
# higher detection rate with more false positives. The scale value
# controls the matching scale allowing you to detect smaller faces.
faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5)

if faces:
diff -= 1
for r in faces:
img.draw_rectangle(r)

pyb.LED(BLUE_LED_PIN).off()
print('Face detected! Saving image...')
sensor.snapshot().save('snapshot-%d.jpg' % pyb.rng()) # Save Pic.

73
Snapshot_on_movement

import sensor, image, pyb, os

RED_LED_PIN = 1
BLUE_LED_PIN = 3

sensor.reset() # Initialize the camera sensor.


sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
sensor.set_auto_whitebal(False) # Turn off white balance.

if not 'temp' in os.listdir(): os.mkdir('temp') # Make a temp directory

while(True):

pyb.LED(RED_LED_PIN).on()
print('About to save background image...')

74
sensor.skip_frames(time = 2000) # Give the user time to get ready.

pyb.LED(RED_LED_PIN).off()
sensor.snapshot().save('temp/bg.bmp')
print('Saved background image - Now detecting motion!')
pyb.LED(BLUE_LED_PIN).on()

diff = 10 # We'll say we detected motion after 10 frames of motion.


while(diff):
img = sensor.snapshot()
img.difference('temp/bg.bmp')
stats = img.statistics()
# Stats 5 is the max of the lighting color channel. The below code
# triggers when the lighting max for the whole image goes above 20.
# The lighting difference maximum should be zero normally.
if (stats[5] > 20):
diff -= 1

pyb.LED(BLUE_LED_PIN).off()
print('Movement detected! Saving image...')

75
sensor.snapshot().save('temp/snapshot-%d.jpg' % pyb.rng()) # Save Pic.

Video-Recording
# GIF Video Recording Example
#
# Note: You will need an SD card to run this example.
#
# You can use your OpenMV Cam to record gif files. You can either feed the
# recorder object RGB565 frames or Grayscale frames. Use photo editing software
# like GIMP to compress and optimize the Gif before uploading it to the web.

import sensor, image, time, gif, pyb

RED_LED_PIN = 1
BLUE_LED_PIN = 3

sensor.reset() # Initialize the camera sensor.


sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)

76
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.

pyb.LED(RED_LED_PIN).on()
sensor.skip_frames(time = 2000) # Give the user time to get ready.

pyb.LED(RED_LED_PIN).off()
pyb.LED(BLUE_LED_PIN).on()

g = gif.Gif('example.gif', loop=True)

print('You're on camera!')
for i in range(100):
clock.tick()
g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds.
print(clock.fps())

g.close()
pyb.LED(BLUE_LED_PIN).off()
print('Done! Reset the camera to see the saved recording.')

77
Face-Detection
# GIF Video Recording on Face Detection Example
#
# Note: You will need an SD card to run this example.
#
# You can use your OpenMV Cam to record gif files. You can either feed the
# recorder object RGB565 frames or Grayscale frames. Use photo editing software
# like GIMP to compress and optimize the Gif before uploading it to the web.
#
# This example demonstrates using face tracking on your OpenMV Cam to take a
# gif.

import sensor, image, time, gif, pyb

RED_LED_PIN = 1
BLUE_LED_PIN = 3

sensor.reset() # Initialize the camera sensor.

78
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.
sensor.set_framesize(sensor.QQVGA) # or sensor.HQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.

# Load up a face detection HaarCascade. This is object that your OpenMV Cam
# can use to detect faces using the find_features() method below. Your OpenMV
# Cam has fontalface HaarCascade built-in. By default, all the stages of the
# HaarCascade are loaded. However, You can adjust the number of stages to speed
# up processing at the expense of accuracy. The frontalface HaarCascade has 25
# stages.
face_cascade = image.HaarCascade('frontalface', stages=25)
while(True):
pyb.LED(RED_LED_PIN).on()
print('About to start detecting faces...')
sensor.skip_frames(time = 2000) # Give the user time to get ready.
pyb.LED(RED_LED_PIN).off()
print('Now detecting faces!')
pyb.LED(BLUE_LED_PIN).on()
diff = 10 # We'll say we detected a face after 10 frames.
while(diff):

79
img = sensor.snapshot()
# Threshold can be between 0.0 and 1.0. A higher threshold results in a
# higher detection rate with more false positives. The scale value
# controls the matching scale allowing you to detect smaller faces.
faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5)

if faces:
diff -= 1
for r in faces:
img.draw_rectangle(r)
g = gif.Gif('example-%d.gif' % pyb.rng(), loop=True)
clock = time.clock() # Tracks FPS.
print('You're on camera!')
for i in range(100):
clock.tick()
# clock.avg() returns the milliseconds between frames - gif delay is in
g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds.
print(clock.fps())

g.close()

80
pyb.LED(BLUE_LED_PIN).off()
print('Restarting…')

Gif_on_movement

# GIF Video Recording on Movement Example


#
# Note: You will need an SD card to run this example.
#
# You can use your OpenMV Cam to record gif files. You can either feed the
# recorder object RGB565 frames or Grayscale frames. Use photo editing software
# like GIMP to compress and optimize the Gif before uploading it to the web.
#
# This example demonstrates using frame differencing with your OpenMV Cam to do
# motion detection. After motion is detected your OpenMV Cam will take video.

import sensor, image, time, gif, pyb, os

81
RED_LED_PIN = 1
BLUE_LED_PIN = 3

sensor.reset() # Initialize the camera sensor.


sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
sensor.set_auto_whitebal(False) # Turn off white balance.

if not 'temp' in os.listdir(): os.mkdir('temp') # Make a temp directory

while(True):
pyb.LED(RED_LED_PIN).on()
print('About to save background image...')
sensor.skip_frames(time = 2000) # Give the user time to get ready.
pyb.LED(RED_LED_PIN).off()
sensor.snapshot().save('temp/bg.bmp')
print('Saved background image - Now detecting motion!')
pyb.LED(BLUE_LED_PIN).on()
diff = 10 # We'll say we detected motion after 10 frames of motion.

82
while(diff):
img = sensor.snapshot()
img.difference('temp/bg.bmp')
stats = img.statistics()
# Stats 5 is the max of the lighting color channel. The below code
# triggers when the lighting max for the whole image goes above 20.
# The lighting difference maximum should be zero normally.
if (stats[5] > 20):
diff -= 1

g = gif.Gif('example-%d.gif' % pyb.rng(), loop=True)

clock = time.clock() # Tracks FPS.


print('You're on camera!')
for i in range(100):
clock.tick()
# clock.avg() returns the milliseconds between frames - gif delay is in
g.add_frame(sensor.snapshot(), delay=int(clock.avg()/10)) # centiseconds.
print(clock.fps())

83
g.close()
pyb.LED(BLUE_LED_PIN).off()
print('Restarting...')

Image_reader

# Image Reader Example


#
# USE THIS EXAMPLE WITH A USD CARD!
#
# This example shows how to use the Image Reader object to replay snapshots of what your
# OpenMV Cam saw saved by the Image Writer object for testing machine vision algorithms.

import sensor, image, time


image
snapshot_source = False # Set to true once finished to pull data from sensor.

sensor.reset()
sensor.set_pixformat(sensor.RGB565)

84
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()

img_reader = None if snapshot_source else image.ImageReader('/stream.bin')

while(True):
clock.tick()
img = sensor.snapshot() if snapshot_source else img_reader.next_frame(copy_to_fb=True, loop=True)
# Do machine vision algorithms on the image here.

print(clock.fps())

Image_writer
# Image Writer Example
#
# USE THIS EXAMPLE WITH A USD CARD! Reset the camera after recording to see the file.
#
# This example shows how to use the Image Writer object to record snapshots of what your

85
# OpenMV Cam sees for later analysis using the Image Reader object. Images written to disk
# by the Image Writer object are stored in a simple file format readable by your OpenMV Cam.

import sensor, image, pyb, time

record_time = 10000 # 10 seconds in milliseconds

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()
img_writer = image.ImageWriter('/stream.bin')

# Red LED on means we are capturing frames.


red_led = pyb.LED(1)
red_led.on()
start = pyb.millis()
while pyb.elapsed_millis(start) < record_time:
clock.tick()

86
img = sensor.snapshot()
# Modify the image if you feel like here...
img_writer.add_frame(img)
print(clock.fps())

img_writer.close()

# Blue LED on means we are done.


red_led.off()
blue_led = pyb.LED(3)
blue_led.on()

print('Done')
while(True):
pyb.wfi()

87
Mjpeg
# MJPEG Video Recording Example
#
# Note: You will need an SD card to run this demo.
#
# You can use your OpenMV Cam to record mjpeg files. You can either feed the
# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished
# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then
# the built-in video player will work too.

import sensor, image, time, mjpeg, pyb

RED_LED_PIN = 1
BLUE_LED_PIN = 3

sensor.reset() # Initialize the camera sensor.


sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.

88
clock = time.clock() # Tracks FPS.

pyb.LED(RED_LED_PIN).on()
sensor.skip_frames(time = 2000) # Give the user time to get ready.

pyb.LED(RED_LED_PIN).off()
pyb.LED(BLUE_LED_PIN).on()

m = mjpeg.Mjpeg('example.mjpeg')

print('You're on camera!')
for i in range(200):
clock.tick()
m.add_frame(sensor.snapshot())
print(clock.fps())

m.close(clock.fps())
pyb.LED(BLUE_LED_PIN).off()
print('Done! Reset the camera to see the saved recording.')

89
Mjpeg_on_face_detection / mjpeg_on_movement

# Face Detection Example


#
# This example shows off the built-in face detection feature of the OpenMV Cam.
#
# Face detection works by using the Haar Cascade feature detector on an image. A
# Haar Cascade is a series of simple area contrasts checks. For the built-in
# frontalface detector there are 25 stages of checks with each stage having
# hundreds of checks a piece. Haar Cascades run fast because later stages are
# only evaluated if previous stages pass. Additionally, your OpenMV Cam uses
# a data structure called the integral image to quickly execute each area
# contrast check in constant time (the reason for feature detection being
# grayscale only is because of the space requirment for the integral image).

import sensor, time, image

# Reset sensor
sensor.reset()

90
# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
# HQVGA and GRAYSCALE are the best for face tracking.
sensor.set_framesize(sensor.HQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

# Load Haar Cascade


# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade('frontalface', stages=25)
print(face_cascade)

# FPS clock
clock = time.clock()

while (True):
clock.tick()

# Capture snapshot

91
img = sensor.snapshot()

# Find objects.
# Note: Lower scale factor scales-down the image more and detects smaller objects.
# Higher threshold results in a higher detection rate, with more false positives.
objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)

# Draw objects
for r in objects:
img.draw_rectangle(r)

# Print FPS.
# Note: Actual FPS is higher, streaming the FB makes it slower.
print(clock.fps())

Face_recognition

# Face recognition with LBP descriptors.


# See Timo Ahonen's 'Face Recognition with Local Binary Patterns'.

92
#
# Before running the example:
# 1) Download the AT&T faces database
http://www.cl.cam.ac.uk/Research/DTG/attarchive/pub/data/att_faces.zip
# 2) Exract and copy the orl_faces directory to the SD card root.

import sensor, time, image

SUB = 's2'
NUM_SUBJECTS = 5
NUM_SUBJECTS_IMGS = 10

img = image.Image('orl_faces/%s/1.pgm'%(SUB)).mask_ellipse()
d0 = img.find_lbp((0, 0, img.width(), img.height()))
img = None

print('')
for s in range(1, NUM_SUBJECTS+1):
dist = 0
for i in range(2, NUM_SUBJECTS_IMGS+1):

93
img = image.Image('orl_faces/s%d/%d.pgm'%(s, i)).mask_ellipse()
d1 = img.find_lbp((0, 0, img.width(), img.height()))
dist += image.match_descriptor(d0, d1)
print('Average dist for subject %d: %d'%(s, dist/NUM_SUBJECTS_IMGS))

Face_tracking

# Face Tracking Example


#
# This example shows off using the keypoints feature of your OpenMV Cam to track
# a face after it has been detected by a Haar Cascade. The first part of this
# script finds a face in the image using the frontalface Haar Cascade.
# After which the script uses the keypoints feature to automatically learn your
# face and track it. Keypoints can be used to automatically track anything.
import sensor, time, image

# Reset sensor
sensor.reset()
sensor.set_contrast(3)

94
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320, 240))
sensor.set_pixformat(sensor.GRAYSCALE)

# Skip a few frames to allow the sensor settle down


sensor.skip_frames(time = 2000)

# Load Haar Cascade


# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade('frontalface', stages=25)
print(face_cascade)

# First set of keypoints


kpts1 = None

# Find a face!
while (kpts1 == None):
img = sensor.snapshot()
img.draw_string(0, 0, 'Looking for a face...')

95
# Find faces
objects = img.find_features(face_cascade, threshold=0.5, scale=1.25)
if objects:
# Expand the ROI by 31 pixels in every direction
face = (objects[0][0]-31, objects[0][1]-31,objects[0][2]+31*2, objects[0][3]+31*2)
# Extract keypoints using the detect face size as the ROI
kpts1 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, roi=face)
# Draw a rectangle around the first face
img.draw_rectangle(objects[0])

# Draw keypoints
print(kpts1)
img.draw_keypoints(kpts1, size=24)
img = sensor.snapshot()
time.sleep(2000)

# FPS clock
clock = time.clock()

while (True):

96
clock.tick()
img = sensor.snapshot()
# Extract keypoints from the whole frame
kpts2 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, normalized=True)

if (kpts2):
# Match the first set of keypoints with the second one
c=image.match_descriptor(kpts1, kpts2, threshold=85)
match = c[6] # C[6] contains the number of matches.
if (match>5):
img.draw_rectangle(c[2:6])
img.draw_cross(c[0], c[1], size=10)
print(kpts2, 'matched:%d dt:%d'%(match, c[7]))

# Draw FPS
img.draw_string(0, 0, 'FPS:%.2f'%(clock.fps()))

97
Eye-Tracking / eye_detection

# Face Eye Detection Example


#
# This script uses the built-in frontalface detector to find a face and then
# the eyes within the face. If you want to determine the eye gaze please see the
# iris_detection script for an example on how to do that.

import sensor, time, image

# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.HQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

98
# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade('frontalface', stages=25)
eyes_cascade = image.HaarCascade('eye', stages=24)
print(face_cascade, eyes_cascade)

# FPS clock
clock = time.clock()

while (True):
clock.tick()

# Capture snapshot
img = sensor.snapshot()

# Find a face !
# Note: Lower scale factor scales-down the image more and detects smaller objects.
# Higher threshold results in a higher detection rate, with more false positives.
objects = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5)

99
# Draw faces
for face in objects:
img.draw_rectangle(face)
# Now find eyes within each face.
# Note: Use a higher threshold here (more detections) and lower scale (to find small
objects)
eyes = img.find_features(eyes_cascade, threshold=0.5, scale_factor=1.2, roi=face)
for e in eyes:
img.draw_rectangle(e)

# Print FPS.
# Note: Actual FPS is higher, streaming the FB makes it slower.
print(clock.fps())

Iris_detection

# Iris Detection 2 Example


#
# This example shows how to find the eye gaze (pupil detection) after finding

100
# the eyes in an image. This script uses the find_eyes function which determines
# the center point of roi that should contain a pupil. It does this by basically
# finding the center of the darkest area in the eye roi which is the pupil center.
#
# Note: This script does not detect a face first, use it with the telephoto lens.
import sensor, time, image

# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(3)
sensor.set_gainceiling(16)

# Set resolution to VGA.


sensor.set_framesize(sensor.VGA)

# Bin/Crop image to 200x100, which gives more details with less data to process
sensor.set_windowing((220, 190, 200, 100))

101
sensor.set_pixformat(sensor.GRAYSCALE)

# Load Haar Cascade


# By default this will use all stages, lower satges is faster but less accurate.
eyes_cascade = image.HaarCascade('eye', stages=24)
print(eyes_cascade)

# FPS clock
clock = time.clock()

while (True):
clock.tick()
# Capture snapshot
img = sensor.snapshot()
# Find eyes !
# Note: Lower scale factor scales-down the image more and detects smaller objects.
# Higher threshold results in a higher detection rate, with more false positives.
eyes = img.find_features(eyes_cascade, threshold=0.5, scale_factor=1.5)

# Find iris

102
for e in eyes:
iris = img.find_eye(e)
img.draw_rectangle(e)
img.draw_cross(iris[0], iris[1])

# Print FPS.
# Note: Actual FPS is higher, streaming the FB makes it slower.
print(clock.fps())

Feature-Detection
# Edge detection with Canny:
#
# This example demonstrates the Canny edge detector.
import sensor, image, time

sensor.reset() # Initialize the camera sensor.


sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.

103
sensor.set_gainceiling(8)

clock = time.clock() # Tracks FPS.


while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
# Use Canny edge detector
img.find_edges(image.EDGE_CANNY, threshold=(50, 80))#设置阈值
# Faster simpler edge detection
#img.find_edges(image.EDGE_SIMPLE, threshold=(100, 255))
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while

Find_circles

# This example shows off how to find circles in the image using the Hough
# Transform. https://en.wikipedia.org/wiki/Circle_Hough_Transform
#
# Note that the find_circles() method will only find circles which are completely
# inside of the image. Circles which go outside of the image/roi are ignored...

104
import sensor, image, time

sensor.reset()

sensor.set_pixformat(sensor.RGB565) # grayscale is faster


sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()

while(True):
clock.tick()
img = sensor.snapshot().lens_corr(1.8)

# Circle objects have four values: x, y, r (radius), and magnitude. The


# magnitude is the strength of the detection of the circle. Higher is
# better...

# `threshold` controls how many circles are found. Increase its value
# to decrease the number of circles detected...

105
# `x_margin`, `y_margin`, and `r_margin` control the merging of similar
# circles in the x, y, and r (radius) directions.

for c in img.find_circles(threshold = 2000, x_margin = 10, y_margin = 10, r_margin = 10):


img.draw_circle(c.x(), c.y(), c.r(), color = (255, 0, 0))
print(c)

print('FPS %f' % clock.fps())

Find_line_segments

enable_lens_corr = False # turn on for straighter lines...

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.RGB565) # grayscale is faster
sensor.set_framesize(sensor.QQVGA)

106
sensor.skip_frames(time = 2000)
clock = time.clock()

# All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points
# and a `line()` method to get all the above as one 4 value tuple for `draw_line()`.

while(True):
clock.tick()
img = sensor.snapshot()
if enable_lens_corr: img.lens_corr(1.8) # for 2.8mm lens...

# `merge_distance` controls the merging of nearby lines. At 0 (the default), no


# merging is done. At 1, any line 1 pixel away from another is merged... and so
# on as you increase this value. You may wish to merge lines as line segment
# detection produces a lot of line segment results.

# `max_theta_diff` controls the maximum amount of rotation difference between


# any two lines about to be merged. The default setting allows for 15 degrees.

for l in img.find_line_segments(merge_distance = 0, max_theta_diff = 5):

107
img.draw_line(l.line(), color = (255, 0, 0))
# print(l)

print('FPS %f' % clock.fps())

Lines
# Find Lines Example
#
# This example shows off how to find lines in the image. For each line object
# found in the image a line object is returned which includes the line's rotation.

# Note: Line detection is done by using the Hough Transform:


# http://en.wikipedia.org/wiki/Hough_transform
# Please read about it above for more information on what `theta` and `rho` are.

# find_lines() finds infinite length lines. Use find_line_segments() to find non-infinite lines.

enable_lens_corr = False # turn on for straighter lines...

108
import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.RGB565) # grayscale is faster
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()

# All line objects have a `theta()` method to get their rotation angle in degrees.
# You can filter lines based on their rotation angle.

min_degree = 0
max_degree = 179

# All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points
# and a `line()` method to get all the above as one 4 value tuple for `draw_line()`.

while(True):
clock.tick()
img = sensor.snapshot()

109
if enable_lens_corr: img.lens_corr(1.8) # for 2.8mm lens...

# `threshold` controls how many lines in the image are found. Only lines with
# edge difference magnitude sums greater than `threshold` are detected...

# More about `threshold` - each pixel in the image contributes a magnitude value
# to a line. The sum of all contributions is the magintude for that line. Then
# when lines are merged their magnitudes are added togheter. Note that `threshold`
# filters out lines with low magnitudes before merging. To see the magnitude of
# un-merged lines set `theta_margin` and `rho_margin` to 0...

# `theta_margin` and `rho_margin` control merging similar lines. If two lines


# theta and rho value differences are less than the margins then they are merged.

for l in img.find_lines(threshold = 1000, theta_margin = 25, rho_margin = 25):


if (min_degree <= l.theta()) and (l.theta() <= max_degree):
img.draw_line(l.line(), color = (255, 0, 0))
# print(l)

print('FPS %f' % clock.fps())

110
# About negative rho values:
#
# A [theta+0:-rho] tuple is the same as [theta+180:+rho].

Find_numbers
# LetNet Example
import sensor, image, time

sensor.reset() # Reset and initialize the sensor.


sensor.set_contrast(3)
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.VGA) # Set frame size to QVGA (320x240)
sensor.set_windowing((128, 128)) # Set 128x128 window.
sensor.skip_frames(time = 2000) # Wait for settings take effect.
sensor.set_auto_gain(False)
sensor.set_auto_exposure(False)

while(True):

111
img = sensor.snapshot()
# NOTE: Uncomment to detect dark numbers on white background
img.invert()
out = img.find_number(roi=(img.width()//2-14, img.height()//2-14, 28, 28))
img.draw_rectangle((img.width()//2-15, img.height()//2-15, 30, 30))
if out[1] > 5: # Confidence level
print('Number: %d Confidence: %0.2f' %(out[0], out[1]))

Find_rects
# Find Rects Example
#
# This example shows off how to find rectangles in the image using the quad threshold
# detection code from our April Tags code. The quad threshold detection algorithm
# detects rectangles in an extremely robust way and is much better than Hough
# Transform based methods. For example, it can still detect rectangles even when lens
# distortion causes those rectangles to look bent. Rounded rectangles are no problem!
# (But, given this the code will also detect small radius circles too)...

import sensor, image, time

112
sensor.reset()
sensor.set_pixformat(sensor.RGB565) # grayscale is faster (160x120 max on OpenMV-M7)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()

while(True):
clock.tick()
img = sensor.snapshot()

# `threshold` below should be set to a high enough value to filter out noise
# rectangles detected in the image which have low edge magnitudes. Rectangles
# have larger edge magnitudes the larger and more contrasty they are...

for r in img.find_rects(threshold = 10000):


img.draw_rectangle(r.rect(), color = (255, 0, 0))
for p in r.corners(): img.draw_circle(p[0], p[1], 5, color = (0, 255, 0))
print(r)

113
print('FPS %f' % clock.fps())

Hog
# Histogram of Oriented Gradients (HoG) Example
#
# This example demonstrates HoG visualization.
#
# Note: Due to JPEG artifacts, the HoG visualization looks blurry. To see the
# image without JPEG artifacts, uncomment the lines that save the image to uSD.

import sensor, image, time

sensor.reset()
# Set sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(8)
sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.skip_frames(time = 2000)

114
clock = time.clock() # Tracks FPS.
while (True):
clock.tick()
img = sensor.snapshot()
img.find_hog()

# Uncomment to save raw FB to file and exit the loop


#img.save('/hog.pgm')
#break

print(clock.fps())

Keypoints
# Object tracking with keypoints example.
# Show the camera an object and then run the script. A set of keypoints will be extracted
# once and then tracked in the following frames. If you want a new set of keypoints re-run
# the script. NOTE: see the docs for arguments to tune find_keypoints and match_keypoints.
import sensor, time, image

115
# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(3)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320, 240))
sensor.set_pixformat(sensor.GRAYSCALE)

sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False, value=100)

def draw_keypoints(img, kpts):


print(kpts)
img.draw_keypoints(kpts)
img = sensor.snapshot()
time.sleep(1000)

116
kpts1 = None
# NOTE: uncomment to load a keypoints descriptor from file
#kpts1 = image.load_descriptor('/desc.orb')
#img = sensor.snapshot()
#draw_keypoints(img, kpts1)

clock = time.clock()
while (True):
clock.tick()
img = sensor.snapshot()
if (kpts1 == None):
# NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image
pyramid.
kpts1 = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2)
draw_keypoints(img, kpts1)
else:
# NOTE: When extracting keypoints to match the first descriptor, we use normalized=True to
extract
# keypoints from the first scale only, which will match one of the scales in the first
descriptor.
kpts2 = img.find_keypoints(max_keypoints=150, threshold=10, normalized=True)

117
if (kpts2):
match = image.match_descriptor(kpts1, kpts2, threshold=85)
if (match.count()>10):
# If we have at least n 'good matches'
# Draw bounding rectangle and cross.
img.draw_rectangle(match.rect())
img.draw_cross(match.cx(), match.cy(), size=10)

print(kpts2, 'matched:%d dt:%d'%(match.count(), match.theta()))


# NOTE: uncomment if you want to draw the keypoints
#img.draw_keypoints(kpts2, size=KEYPOINTS_SIZE, matched=True)

# Draw FPS
img.draw_string(0, 0, 'FPS:%.2f'%(clock.fps()))

LBP。
# Local Binary Patterns (LBP) Example
#
# This example shows off how to use the local binary pattern feature descriptor

118
# on your OpenMV Cam. LBP descriptors work like Freak feature descriptors.
#
# WARNING: LBP supports needs to be reworked! As of right now this feature needs
# a lot of work to be made into somethin useful. This script will reamin to show
# that the functionality exists, but, in its current state is inadequate.

import sensor, time, image


sensor.reset()

# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.HQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

# Load Haar Cascade


# By default this will use all stages, lower satges is faster but less accurate.

119
face_cascade = image.HaarCascade('frontalface', stages=25)
print(face_cascade)

# Skip a few frames to allow the sensor settle down


# Note: This takes more time when exec from the IDE.
for i in range(0, 30):
img = sensor.snapshot()
img.draw_string(0, 0, 'Please wait...')

d0 = None
#d0 = image.load_descriptor('/desc.lbp')
clock = time.clock()

while (True):
clock.tick()
img = sensor.snapshot()

objects = img.find_features(face_cascade, threshold=0.5, scale_factor=1.25)


if objects:
face = objects[0]

120
d1 = img.find_lbp(face)
if (d0 == None):
d0 = d1
else:
dist = image.match_descriptor(d0, d1)
img.draw_string(0, 10, 'Match %d%%'%(dist))

img.draw_rectangle(face)
# Draw FPS
img.draw_string(0, 0, 'FPS:%.2f'%(clock.fps()))

Linear_regression_fast
# Fast Linear Regression Example
#
# This example shows off how to use the get_regression() method on your OpenMV Cam
# to get the linear regression of a ROI. Using this method you can easily build
# a robot which can track lines which all point in the same general direction
# but are not actually connected. Use find_blobs() on lines that are nicely
# connected for better filtering options and control.

121
#
# This is called the fast linear regression because we use the least-squares
# method to fit the line. However, this method is NOT GOOD FOR ANY images that
# have a lot (or really any) outlier points which corrupt the line fit...

THRESHOLD = (0, 100) # Grayscale threshold for dark things...


BINARY_VISIBLE = True # Does binary first so you can see what the linear regression
# is being run on... might lower FPS though.

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()

while(True):
clock.tick()
img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot()

122
# Returns a line object similar to line objects returned by find_lines() and
# find_line_segments(). You have x1(), y1(), x2(), y2(), length(),
# theta() (rotation in degrees), rho(), and magnitude().
#
# magnitude() represents how well the linear regression worked. It goes from
# (0, INF] where 0 is returned for a circle. The more linear the
# scene is the higher the magnitude.
line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD])

if (line): img.draw_line(line.line(), color = 127)


print('FPS %f, mag = %s' % (clock.fps(), str(line.magnitude()) if (line) else 'N/A'))

# About negative rho values:


#
# A [theta+0:-rho] tuple is the same as [theta+180:+rho].

123
Linear_regression_robust
# Robust Linear Regression Example
#
# This example shows off how to use the get_regression() method on your OpenMV Cam
# to get the linear regression of a ROI. Using this method you can easily build
# a robot which can track lines which all point in the same general direction
# but are not actually connected. Use find_blobs() on lines that are nicely
# connected for better filtering options and control.
#
# We're using the robust=True argument for get_regression() in this script which
# computes the linear regression using a much more robust algorithm... but potentially
# much slower. The robust algorithm runs in O(N^2) time on the image. So, YOU NEED
# TO LIMIT THE NUMBER OF PIXELS the robust algorithm works on or it can actually
# take seconds for the algorithm to give you a result... THRESHOLD VERY CAREFULLY!

THRESHOLD = (0, 100) # Grayscale threshold for dark things...


BINARY_VISIBLE = True # Does binary first so you can see what the linear regression
# is being run on... might lower FPS though.

124
import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQQVGA) # 80x60 (4,800 pixels) - O(N^2) max = 2,3040,000.
sensor.skip_frames(time = 2000) # WARNING: If you use QQVGA it may take seconds
clock = time.clock() # to process a frame sometimes.

while(True):
clock.tick()
img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot()

# Returns a line object similar to line objects returned by find_lines() and


# find_line_segments(). You have x1(), y1(), x2(), y2(), length(),
# theta() (rotation in degrees), rho(), and magnitude().
#
# magnitude() represents how well the linear regression worked. It means something
# different for the robust linear regression. In general, the larger the value the
# better...
line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD], robust = True)

125
if (line): img.draw_line(line.line(), color = 127)
print('FPS %f, mag = %s' % (clock.fps(), str(line.magnitude()) if (line) else 'N/A'))

# About negative rho values:


#
# A [theta+0:-rho] tuple is the same as [theta+180:+rho].

Template_matching
# Template Matching Example - Normalized Cross Correlation (NCC)
#
# This example shows off how to use the NCC feature of your OpenMV Cam to match
# image patches to parts of an image... expect for extremely controlled enviorments
# NCC is not all to useful.
#
# WARNING: NCC supports needs to be reworked! As of right now this feature needs
# a lot of work to be made into somethin useful. This script will reamin to show
# that the functionality exists, but, in its current state is inadequate.

126
import time, sensor, image
from image import SEARCH_EX, SEARCH_DS

# Reset sensor
sensor.reset()

# Set sensor settings


sensor.set_contrast(1)
sensor.set_gainceiling(16)
# Max resolution for template matching with SEARCH_EX is QQVGA
sensor.set_framesize(sensor.QQVGA)
# You can set windowing to reduce the search image.
#sensor.set_windowing(((640-80)//2, (480-60)//2, 80, 60))
sensor.set_pixformat(sensor.GRAYSCALE)

# Load template.
# Template should be a small (eg. 32x32 pixels) grayscale image.
template = image.Image('/template.pgm')

clock = time.clock()

127
# Run template matching
while (True):
clock.tick()
img = sensor.snapshot()

# find_template(template, threshold, [roi, step, search])


# ROI: The region of interest tuple (x, y, w, h).
# Step: The loop step used (y+=step, x+=step) use a bigger step to make it faster.
# Search is either image.SEARCH_EX for exhaustive search or image.SEARCH_DS for diamond
search
#
# Note1: ROI has to be smaller than the image and bigger than the template.
# Note2: In diamond search, step and ROI are both ignored.
r = img.find_template(template, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60))
if r:
img.draw_rectangle(r)

print(clock.fps())

128
129

Вам также может понравиться