Ajout du GUI
This commit is contained in:
151
kivy/core/camera/__init__.py
Normal file
151
kivy/core/camera/__init__.py
Normal file
@@ -0,0 +1,151 @@
|
||||
'''
|
||||
Camera
|
||||
======
|
||||
|
||||
Core class for acquiring the camera and converting its input into a
|
||||
:class:`~kivy.graphics.texture.Texture`.
|
||||
|
||||
.. versionchanged:: 1.10.0
|
||||
The pygst and videocapture providers have been removed.
|
||||
|
||||
.. versionchanged:: 1.8.0
|
||||
There is now 2 distinct Gstreamer implementation: one using Gi/Gst
|
||||
working for both Python 2+3 with Gstreamer 1.0, and one using PyGST
|
||||
working only for Python 2 + Gstreamer 0.10.
|
||||
'''
|
||||
|
||||
__all__ = ('CameraBase', 'Camera')
|
||||
|
||||
|
||||
from kivy.utils import platform
|
||||
from kivy.event import EventDispatcher
|
||||
from kivy.logger import Logger
|
||||
from kivy.core import core_select_lib
|
||||
|
||||
|
||||
class CameraBase(EventDispatcher):
|
||||
'''Abstract Camera Widget class.
|
||||
|
||||
Concrete camera classes must implement initialization and
|
||||
frame capturing to a buffer that can be uploaded to the gpu.
|
||||
|
||||
:Parameters:
|
||||
`index`: int
|
||||
Source index of the camera.
|
||||
`size`: tuple (int, int)
|
||||
Size at which the image is drawn. If no size is specified,
|
||||
it defaults to the resolution of the camera image.
|
||||
`resolution`: tuple (int, int)
|
||||
Resolution to try to request from the camera.
|
||||
Used in the gstreamer pipeline by forcing the appsink caps
|
||||
to this resolution. If the camera doesn't support the resolution,
|
||||
a negotiation error might be thrown.
|
||||
|
||||
:Events:
|
||||
`on_load`
|
||||
Fired when the camera is loaded and the texture has become
|
||||
available.
|
||||
`on_texture`
|
||||
Fired each time the camera texture is updated.
|
||||
'''
|
||||
|
||||
__events__ = ('on_load', 'on_texture')
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
kwargs.setdefault('stopped', False)
|
||||
kwargs.setdefault('resolution', (640, 480))
|
||||
kwargs.setdefault('index', 0)
|
||||
|
||||
self.stopped = kwargs.get('stopped')
|
||||
self._resolution = kwargs.get('resolution')
|
||||
self._index = kwargs.get('index')
|
||||
self._buffer = None
|
||||
self._format = 'rgb'
|
||||
self._texture = None
|
||||
self.capture_device = None
|
||||
kwargs.setdefault('size', self._resolution)
|
||||
|
||||
super(CameraBase, self).__init__()
|
||||
|
||||
self.init_camera()
|
||||
|
||||
if not self.stopped:
|
||||
self.start()
|
||||
|
||||
def _set_resolution(self, res):
|
||||
self._resolution = res
|
||||
self.init_camera()
|
||||
|
||||
def _get_resolution(self):
|
||||
return self._resolution
|
||||
|
||||
resolution = property(lambda self: self._get_resolution(),
|
||||
lambda self, x: self._set_resolution(x),
|
||||
doc='Resolution of camera capture (width, height)')
|
||||
|
||||
def _set_index(self, x):
|
||||
if x == self._index:
|
||||
return
|
||||
self._index = x
|
||||
self.init_camera()
|
||||
|
||||
def _get_index(self):
|
||||
return self._x
|
||||
|
||||
index = property(lambda self: self._get_index(),
|
||||
lambda self, x: self._set_index(x),
|
||||
doc='Source index of the camera')
|
||||
|
||||
def _get_texture(self):
|
||||
return self._texture
|
||||
texture = property(lambda self: self._get_texture(),
|
||||
doc='Return the camera texture with the latest capture')
|
||||
|
||||
def init_camera(self):
|
||||
'''Initialise the camera (internal)'''
|
||||
pass
|
||||
|
||||
def start(self):
|
||||
'''Start the camera acquire'''
|
||||
self.stopped = False
|
||||
|
||||
def stop(self):
|
||||
'''Release the camera'''
|
||||
self.stopped = True
|
||||
|
||||
def _update(self, dt):
|
||||
'''Update the camera (internal)'''
|
||||
pass
|
||||
|
||||
def _copy_to_gpu(self):
|
||||
'''Copy the the buffer into the texture'''
|
||||
if self._texture is None:
|
||||
Logger.debug('Camera: copy_to_gpu() failed, _texture is None !')
|
||||
return
|
||||
self._texture.blit_buffer(self._buffer, colorfmt=self._format)
|
||||
self._buffer = None
|
||||
self.dispatch('on_texture')
|
||||
|
||||
def on_texture(self):
|
||||
pass
|
||||
|
||||
def on_load(self):
|
||||
pass
|
||||
|
||||
|
||||
# Load the appropriate providers
|
||||
providers = ()
|
||||
|
||||
if platform in ['macosx', 'ios']:
|
||||
providers += (('avfoundation', 'camera_avfoundation',
|
||||
'CameraAVFoundation'), )
|
||||
elif platform == 'android':
|
||||
providers += (('android', 'camera_android', 'CameraAndroid'), )
|
||||
else:
|
||||
providers += (('picamera', 'camera_picamera', 'CameraPiCamera'), )
|
||||
providers += (('gi', 'camera_gi', 'CameraGi'), )
|
||||
|
||||
providers += (('opencv', 'camera_opencv', 'CameraOpenCV'), )
|
||||
|
||||
|
||||
Camera = core_select_lib('camera', (providers))
|
||||
BIN
kivy/core/camera/__pycache__/__init__.cpython-310.pyc
Normal file
BIN
kivy/core/camera/__pycache__/__init__.cpython-310.pyc
Normal file
Binary file not shown.
BIN
kivy/core/camera/__pycache__/camera_android.cpython-310.pyc
Normal file
BIN
kivy/core/camera/__pycache__/camera_android.cpython-310.pyc
Normal file
Binary file not shown.
BIN
kivy/core/camera/__pycache__/camera_gi.cpython-310.pyc
Normal file
BIN
kivy/core/camera/__pycache__/camera_gi.cpython-310.pyc
Normal file
Binary file not shown.
BIN
kivy/core/camera/__pycache__/camera_opencv.cpython-310.pyc
Normal file
BIN
kivy/core/camera/__pycache__/camera_opencv.cpython-310.pyc
Normal file
Binary file not shown.
BIN
kivy/core/camera/__pycache__/camera_picamera.cpython-310.pyc
Normal file
BIN
kivy/core/camera/__pycache__/camera_picamera.cpython-310.pyc
Normal file
Binary file not shown.
206
kivy/core/camera/camera_android.py
Normal file
206
kivy/core/camera/camera_android.py
Normal file
@@ -0,0 +1,206 @@
|
||||
from jnius import autoclass, PythonJavaClass, java_method
|
||||
from kivy.clock import Clock
|
||||
from kivy.graphics.texture import Texture
|
||||
from kivy.graphics import Fbo, Callback, Rectangle
|
||||
from kivy.core.camera import CameraBase
|
||||
import threading
|
||||
|
||||
|
||||
Camera = autoclass('android.hardware.Camera')
|
||||
SurfaceTexture = autoclass('android.graphics.SurfaceTexture')
|
||||
GL_TEXTURE_EXTERNAL_OES = autoclass(
|
||||
'android.opengl.GLES11Ext').GL_TEXTURE_EXTERNAL_OES
|
||||
ImageFormat = autoclass('android.graphics.ImageFormat')
|
||||
|
||||
|
||||
class PreviewCallback(PythonJavaClass):
|
||||
"""
|
||||
Interface used to get back the preview frame of the Android Camera
|
||||
"""
|
||||
__javainterfaces__ = ('android.hardware.Camera$PreviewCallback', )
|
||||
|
||||
def __init__(self, callback):
|
||||
super(PreviewCallback, self).__init__()
|
||||
self._callback = callback
|
||||
|
||||
@java_method('([BLandroid/hardware/Camera;)V')
|
||||
def onPreviewFrame(self, data, camera):
|
||||
self._callback(data, camera)
|
||||
|
||||
|
||||
class CameraAndroid(CameraBase):
|
||||
"""
|
||||
Implementation of CameraBase using Android API
|
||||
"""
|
||||
|
||||
_update_ev = None
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self._android_camera = None
|
||||
self._preview_cb = PreviewCallback(self._on_preview_frame)
|
||||
self._buflock = threading.Lock()
|
||||
super(CameraAndroid, self).__init__(**kwargs)
|
||||
|
||||
def __del__(self):
|
||||
self._release_camera()
|
||||
|
||||
def init_camera(self):
|
||||
self._release_camera()
|
||||
self._android_camera = Camera.open(self._index)
|
||||
params = self._android_camera.getParameters()
|
||||
width, height = self._resolution
|
||||
params.setPreviewSize(width, height)
|
||||
supported_focus_modes = self._android_camera.getParameters() \
|
||||
.getSupportedFocusModes()
|
||||
if supported_focus_modes.contains('continuous-picture'):
|
||||
params.setFocusMode('continuous-picture')
|
||||
self._android_camera.setParameters(params)
|
||||
# self._android_camera.setDisplayOrientation()
|
||||
self.fps = 30.
|
||||
|
||||
pf = params.getPreviewFormat()
|
||||
assert(pf == ImageFormat.NV21) # default format is NV21
|
||||
self._bufsize = int(ImageFormat.getBitsPerPixel(pf) / 8. *
|
||||
width * height)
|
||||
|
||||
self._camera_texture = Texture(width=width, height=height,
|
||||
target=GL_TEXTURE_EXTERNAL_OES,
|
||||
colorfmt='rgba')
|
||||
self._surface_texture = SurfaceTexture(int(self._camera_texture.id))
|
||||
self._android_camera.setPreviewTexture(self._surface_texture)
|
||||
|
||||
self._fbo = Fbo(size=self._resolution)
|
||||
self._fbo['resolution'] = (float(width), float(height))
|
||||
self._fbo.shader.fs = '''
|
||||
#extension GL_OES_EGL_image_external : require
|
||||
#ifdef GL_ES
|
||||
precision highp float;
|
||||
#endif
|
||||
|
||||
/* Outputs from the vertex shader */
|
||||
varying vec4 frag_color;
|
||||
varying vec2 tex_coord0;
|
||||
|
||||
/* uniform texture samplers */
|
||||
uniform sampler2D texture0;
|
||||
uniform samplerExternalOES texture1;
|
||||
uniform vec2 resolution;
|
||||
|
||||
void main()
|
||||
{
|
||||
vec2 coord = vec2(tex_coord0.y * (
|
||||
resolution.y / resolution.x), 1. -tex_coord0.x);
|
||||
gl_FragColor = texture2D(texture1, tex_coord0);
|
||||
}
|
||||
'''
|
||||
with self._fbo:
|
||||
self._texture_cb = Callback(lambda instr:
|
||||
self._camera_texture.bind)
|
||||
Rectangle(size=self._resolution)
|
||||
|
||||
def _release_camera(self):
|
||||
if self._android_camera is None:
|
||||
return
|
||||
|
||||
self.stop()
|
||||
self._android_camera.release()
|
||||
self._android_camera = None
|
||||
|
||||
# clear texture and it'll be reset in `_update` pointing to new FBO
|
||||
self._texture = None
|
||||
del self._fbo, self._surface_texture, self._camera_texture
|
||||
|
||||
def _on_preview_frame(self, data, camera):
|
||||
with self._buflock:
|
||||
if self._buffer is not None:
|
||||
# add buffer back for reuse
|
||||
self._android_camera.addCallbackBuffer(self._buffer)
|
||||
self._buffer = data
|
||||
# check if frame grabbing works
|
||||
# print self._buffer, len(self.frame_data)
|
||||
|
||||
def _refresh_fbo(self):
|
||||
self._texture_cb.ask_update()
|
||||
self._fbo.draw()
|
||||
|
||||
def start(self):
|
||||
super(CameraAndroid, self).start()
|
||||
|
||||
with self._buflock:
|
||||
self._buffer = None
|
||||
for k in range(2): # double buffer
|
||||
buf = b'\x00' * self._bufsize
|
||||
self._android_camera.addCallbackBuffer(buf)
|
||||
self._android_camera.setPreviewCallbackWithBuffer(self._preview_cb)
|
||||
|
||||
self._android_camera.startPreview()
|
||||
if self._update_ev is not None:
|
||||
self._update_ev.cancel()
|
||||
self._update_ev = Clock.schedule_interval(self._update, 1 / self.fps)
|
||||
|
||||
def stop(self):
|
||||
super(CameraAndroid, self).stop()
|
||||
if self._update_ev is not None:
|
||||
self._update_ev.cancel()
|
||||
self._update_ev = None
|
||||
self._android_camera.stopPreview()
|
||||
|
||||
self._android_camera.setPreviewCallbackWithBuffer(None)
|
||||
# buffer queue cleared as well, to be recreated on next start
|
||||
with self._buflock:
|
||||
self._buffer = None
|
||||
|
||||
def _update(self, dt):
|
||||
self._surface_texture.updateTexImage()
|
||||
self._refresh_fbo()
|
||||
if self._texture is None:
|
||||
self._texture = self._fbo.texture
|
||||
self.dispatch('on_load')
|
||||
self._copy_to_gpu()
|
||||
|
||||
def _copy_to_gpu(self):
|
||||
"""
|
||||
A dummy placeholder (the image is already in GPU) to be consistent
|
||||
with other providers.
|
||||
"""
|
||||
self.dispatch('on_texture')
|
||||
|
||||
def grab_frame(self):
|
||||
"""
|
||||
Grab current frame (thread-safe, minimal overhead)
|
||||
"""
|
||||
with self._buflock:
|
||||
if self._buffer is None:
|
||||
return None
|
||||
buf = self._buffer.tostring()
|
||||
return buf
|
||||
|
||||
def decode_frame(self, buf):
|
||||
"""
|
||||
Decode image data from grabbed frame.
|
||||
|
||||
This method depends on OpenCV and NumPy - however it is only used for
|
||||
fetching the current frame as a NumPy array, and not required when
|
||||
this :class:`CameraAndroid` provider is simply used by a
|
||||
:class:`~kivy.uix.camera.Camera` widget.
|
||||
"""
|
||||
import numpy as np
|
||||
from cv2 import cvtColor
|
||||
|
||||
w, h = self._resolution
|
||||
arr = np.fromstring(buf, 'uint8').reshape((h + h / 2, w))
|
||||
arr = cvtColor(arr, 93) # NV21 -> BGR
|
||||
return arr
|
||||
|
||||
def read_frame(self):
|
||||
"""
|
||||
Grab and decode frame in one call
|
||||
"""
|
||||
return self.decode_frame(self.grab_frame())
|
||||
|
||||
@staticmethod
|
||||
def get_camera_count():
|
||||
"""
|
||||
Get the number of available cameras.
|
||||
"""
|
||||
return Camera.getNumberOfCameras()
|
||||
170
kivy/core/camera/camera_gi.py
Normal file
170
kivy/core/camera/camera_gi.py
Normal file
@@ -0,0 +1,170 @@
|
||||
'''
|
||||
Gi Camera
|
||||
=========
|
||||
|
||||
Implement CameraBase with Gi / Gstreamer, working on both Python 2 and 3
|
||||
'''
|
||||
|
||||
__all__ = ('CameraGi', )
|
||||
|
||||
from gi.repository import Gst
|
||||
from kivy.clock import Clock
|
||||
from kivy.graphics.texture import Texture
|
||||
from kivy.core.camera import CameraBase
|
||||
from kivy.support import install_gobject_iteration
|
||||
from kivy.logger import Logger
|
||||
from ctypes import Structure, c_void_p, c_int, string_at
|
||||
from weakref import ref
|
||||
import atexit
|
||||
|
||||
# initialize the camera/gi. if the older version is used, don't use camera_gi.
|
||||
Gst.init(None)
|
||||
version = Gst.version()
|
||||
if version < (1, 0, 0, 0):
|
||||
raise Exception('Cannot use camera_gi, Gstreamer < 1.0 is not supported.')
|
||||
Logger.info('CameraGi: Using Gstreamer {}'.format(
|
||||
'.'.join(['{}'.format(x) for x in Gst.version()])))
|
||||
install_gobject_iteration()
|
||||
|
||||
|
||||
class _MapInfo(Structure):
|
||||
_fields_ = [
|
||||
('memory', c_void_p),
|
||||
('flags', c_int),
|
||||
('data', c_void_p)]
|
||||
# we don't care about the rest
|
||||
|
||||
|
||||
def _on_cameragi_unref(obj):
|
||||
if obj in CameraGi._instances:
|
||||
CameraGi._instances.remove(obj)
|
||||
|
||||
|
||||
class CameraGi(CameraBase):
|
||||
'''Implementation of CameraBase using GStreamer
|
||||
|
||||
:Parameters:
|
||||
`video_src`: str, default is 'v4l2src'
|
||||
Other tested options are: 'dc1394src' for firewire
|
||||
dc camera (e.g. firefly MV). Any gstreamer video source
|
||||
should potentially work.
|
||||
Theoretically a longer string using "!" can be used
|
||||
describing the first part of a gstreamer pipeline.
|
||||
'''
|
||||
|
||||
_instances = []
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self._pipeline = None
|
||||
self._camerasink = None
|
||||
self._decodebin = None
|
||||
self._texturesize = None
|
||||
self._video_src = kwargs.get('video_src', 'v4l2src')
|
||||
wk = ref(self, _on_cameragi_unref)
|
||||
CameraGi._instances.append(wk)
|
||||
super(CameraGi, self).__init__(**kwargs)
|
||||
|
||||
def init_camera(self):
|
||||
# TODO: This doesn't work when camera resolution is resized at runtime.
|
||||
# There must be some other way to release the camera?
|
||||
if self._pipeline:
|
||||
self._pipeline = None
|
||||
|
||||
video_src = self._video_src
|
||||
if video_src == 'v4l2src':
|
||||
video_src += ' device=/dev/video%d' % self._index
|
||||
elif video_src == 'dc1394src':
|
||||
video_src += ' camera-number=%d' % self._index
|
||||
|
||||
if Gst.version() < (1, 0, 0, 0):
|
||||
caps = ('video/x-raw-rgb,red_mask=(int)0xff0000,'
|
||||
'green_mask=(int)0x00ff00,blue_mask=(int)0x0000ff')
|
||||
pl = ('{} ! decodebin name=decoder ! ffmpegcolorspace ! '
|
||||
'appsink name=camerasink emit-signals=True caps={}')
|
||||
else:
|
||||
caps = 'video/x-raw,format=RGB'
|
||||
pl = '{} ! decodebin name=decoder ! videoconvert ! appsink ' + \
|
||||
'name=camerasink emit-signals=True caps={}'
|
||||
|
||||
self._pipeline = Gst.parse_launch(pl.format(video_src, caps))
|
||||
self._camerasink = self._pipeline.get_by_name('camerasink')
|
||||
self._camerasink.connect('new-sample', self._gst_new_sample)
|
||||
self._decodebin = self._pipeline.get_by_name('decoder')
|
||||
|
||||
if self._camerasink and not self.stopped:
|
||||
self.start()
|
||||
|
||||
def _gst_new_sample(self, *largs):
|
||||
sample = self._camerasink.emit('pull-sample')
|
||||
if sample is None:
|
||||
return False
|
||||
|
||||
self._sample = sample
|
||||
|
||||
if self._texturesize is None:
|
||||
# try to get the camera image size
|
||||
for pad in self._decodebin.srcpads:
|
||||
s = pad.get_current_caps().get_structure(0)
|
||||
self._texturesize = (
|
||||
s.get_value('width'),
|
||||
s.get_value('height'))
|
||||
Clock.schedule_once(self._update)
|
||||
return False
|
||||
|
||||
Clock.schedule_once(self._update)
|
||||
return False
|
||||
|
||||
def start(self):
|
||||
super(CameraGi, self).start()
|
||||
self._pipeline.set_state(Gst.State.PLAYING)
|
||||
|
||||
def stop(self):
|
||||
super(CameraGi, self).stop()
|
||||
self._pipeline.set_state(Gst.State.PAUSED)
|
||||
|
||||
def unload(self):
|
||||
self._pipeline.set_state(Gst.State.NULL)
|
||||
|
||||
def _update(self, dt):
|
||||
sample, self._sample = self._sample, None
|
||||
if sample is None:
|
||||
return
|
||||
|
||||
if self._texture is None and self._texturesize is not None:
|
||||
self._texture = Texture.create(
|
||||
size=self._texturesize, colorfmt='rgb')
|
||||
self._texture.flip_vertical()
|
||||
self.dispatch('on_load')
|
||||
|
||||
# decode sample
|
||||
# read the data from the buffer memory
|
||||
try:
|
||||
buf = sample.get_buffer()
|
||||
result, mapinfo = buf.map(Gst.MapFlags.READ)
|
||||
|
||||
# We cannot get the data out of mapinfo, using Gst 1.0.6 + Gi 3.8.0
|
||||
# related bug report:
|
||||
# https://bugzilla.gnome.org/show_bug.cgi?id=6t8663
|
||||
# ie: mapinfo.data is normally a char*, but here, we have an int
|
||||
# So right now, we use ctypes instead to read the mapinfo ourself.
|
||||
addr = mapinfo.__hash__()
|
||||
c_mapinfo = _MapInfo.from_address(addr)
|
||||
|
||||
# now get the memory
|
||||
self._buffer = string_at(c_mapinfo.data, mapinfo.size)
|
||||
self._copy_to_gpu()
|
||||
finally:
|
||||
if mapinfo is not None:
|
||||
buf.unmap(mapinfo)
|
||||
|
||||
|
||||
@atexit.register
|
||||
def camera_gi_clean():
|
||||
# if we leave the python process with some video running, we can hit a
|
||||
# segfault. This is forcing the stop/unload of all remaining videos before
|
||||
# exiting the python process.
|
||||
for weakcamera in CameraGi._instances:
|
||||
camera = weakcamera()
|
||||
if isinstance(camera, CameraGi):
|
||||
camera.stop()
|
||||
camera.unload()
|
||||
163
kivy/core/camera/camera_opencv.py
Normal file
163
kivy/core/camera/camera_opencv.py
Normal file
@@ -0,0 +1,163 @@
|
||||
'''
|
||||
OpenCV Camera: Implement CameraBase with OpenCV
|
||||
'''
|
||||
|
||||
#
|
||||
# TODO: make usage of thread or multiprocess
|
||||
#
|
||||
|
||||
from __future__ import division
|
||||
|
||||
__all__ = ('CameraOpenCV')
|
||||
|
||||
|
||||
from kivy.logger import Logger
|
||||
from kivy.clock import Clock
|
||||
from kivy.graphics.texture import Texture
|
||||
from kivy.core.camera import CameraBase
|
||||
|
||||
try:
|
||||
# opencv 1 case
|
||||
import opencv as cv
|
||||
|
||||
try:
|
||||
import opencv.highgui as hg
|
||||
except ImportError:
|
||||
class Hg(object):
|
||||
'''
|
||||
On OSX, not only are the import names different,
|
||||
but the API also differs.
|
||||
There is no module called 'highgui' but the names are
|
||||
directly available in the 'cv' module.
|
||||
Some of them even have a different names.
|
||||
|
||||
Therefore we use this proxy object.
|
||||
'''
|
||||
|
||||
def __getattr__(self, attr):
|
||||
if attr.startswith('cv'):
|
||||
attr = attr[2:]
|
||||
got = getattr(cv, attr)
|
||||
return got
|
||||
|
||||
hg = Hg()
|
||||
|
||||
except ImportError:
|
||||
# opencv 2 case (and also opencv 3, because it still uses cv2 module name)
|
||||
try:
|
||||
import cv2
|
||||
# here missing this OSX specific highgui thing.
|
||||
# I'm not on OSX so don't know if it is still valid in opencv >= 2
|
||||
except ImportError:
|
||||
raise
|
||||
|
||||
|
||||
class CameraOpenCV(CameraBase):
|
||||
'''
|
||||
Implementation of CameraBase using OpenCV
|
||||
'''
|
||||
_update_ev = None
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
# we will need it, because constants have
|
||||
# different access paths between ver. 2 and 3
|
||||
try:
|
||||
self.opencvMajorVersion = int(cv.__version__[0])
|
||||
except NameError:
|
||||
self.opencvMajorVersion = int(cv2.__version__[0])
|
||||
|
||||
self._device = None
|
||||
super(CameraOpenCV, self).__init__(**kwargs)
|
||||
|
||||
def init_camera(self):
|
||||
# consts have changed locations between versions 2 and 3
|
||||
if self.opencvMajorVersion in (3, 4):
|
||||
PROPERTY_WIDTH = cv2.CAP_PROP_FRAME_WIDTH
|
||||
PROPERTY_HEIGHT = cv2.CAP_PROP_FRAME_HEIGHT
|
||||
PROPERTY_FPS = cv2.CAP_PROP_FPS
|
||||
elif self.opencvMajorVersion == 2:
|
||||
PROPERTY_WIDTH = cv2.cv.CV_CAP_PROP_FRAME_WIDTH
|
||||
PROPERTY_HEIGHT = cv2.cv.CV_CAP_PROP_FRAME_HEIGHT
|
||||
PROPERTY_FPS = cv2.cv.CV_CAP_PROP_FPS
|
||||
elif self.opencvMajorVersion == 1:
|
||||
PROPERTY_WIDTH = cv.CV_CAP_PROP_FRAME_WIDTH
|
||||
PROPERTY_HEIGHT = cv.CV_CAP_PROP_FRAME_HEIGHT
|
||||
PROPERTY_FPS = cv.CV_CAP_PROP_FPS
|
||||
|
||||
Logger.debug('Using opencv ver.' + str(self.opencvMajorVersion))
|
||||
|
||||
if self.opencvMajorVersion == 1:
|
||||
# create the device
|
||||
self._device = hg.cvCreateCameraCapture(self._index)
|
||||
# Set preferred resolution
|
||||
cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_WIDTH,
|
||||
self.resolution[0])
|
||||
cv.SetCaptureProperty(self._device, cv.CV_CAP_PROP_FRAME_HEIGHT,
|
||||
self.resolution[1])
|
||||
# and get frame to check if it's ok
|
||||
frame = hg.cvQueryFrame(self._device)
|
||||
# Just set the resolution to the frame we just got, but don't use
|
||||
# self.resolution for that as that would cause an infinite
|
||||
# recursion with self.init_camera (but slowly as we'd have to
|
||||
# always get a frame).
|
||||
self._resolution = (int(frame.width), int(frame.height))
|
||||
# get fps
|
||||
self.fps = cv.GetCaptureProperty(self._device, cv.CV_CAP_PROP_FPS)
|
||||
|
||||
elif self.opencvMajorVersion in (2, 3, 4):
|
||||
# create the device
|
||||
self._device = cv2.VideoCapture(self._index)
|
||||
# Set preferred resolution
|
||||
self._device.set(PROPERTY_WIDTH,
|
||||
self.resolution[0])
|
||||
self._device.set(PROPERTY_HEIGHT,
|
||||
self.resolution[1])
|
||||
# and get frame to check if it's ok
|
||||
ret, frame = self._device.read()
|
||||
|
||||
# source:
|
||||
# http://stackoverflow.com/questions/32468371/video-capture-propid-parameters-in-opencv # noqa
|
||||
self._resolution = (int(frame.shape[1]), int(frame.shape[0]))
|
||||
# get fps
|
||||
self.fps = self._device.get(PROPERTY_FPS)
|
||||
|
||||
if self.fps == 0 or self.fps == 1:
|
||||
self.fps = 1.0 / 30
|
||||
elif self.fps > 1:
|
||||
self.fps = 1.0 / self.fps
|
||||
|
||||
if not self.stopped:
|
||||
self.start()
|
||||
|
||||
def _update(self, dt):
|
||||
if self.stopped:
|
||||
return
|
||||
if self._texture is None:
|
||||
# Create the texture
|
||||
self._texture = Texture.create(self._resolution)
|
||||
self._texture.flip_vertical()
|
||||
self.dispatch('on_load')
|
||||
try:
|
||||
ret, frame = self._device.read()
|
||||
self._format = 'bgr'
|
||||
try:
|
||||
self._buffer = frame.imageData
|
||||
except AttributeError:
|
||||
# frame is already of type ndarray
|
||||
# which can be reshaped to 1-d.
|
||||
self._buffer = frame.reshape(-1)
|
||||
self._copy_to_gpu()
|
||||
except:
|
||||
Logger.exception('OpenCV: Couldn\'t get image from Camera')
|
||||
|
||||
def start(self):
|
||||
super(CameraOpenCV, self).start()
|
||||
if self._update_ev is not None:
|
||||
self._update_ev.cancel()
|
||||
self._update_ev = Clock.schedule_interval(self._update, self.fps)
|
||||
|
||||
def stop(self):
|
||||
super(CameraOpenCV, self).stop()
|
||||
if self._update_ev is not None:
|
||||
self._update_ev.cancel()
|
||||
self._update_ev = None
|
||||
96
kivy/core/camera/camera_picamera.py
Normal file
96
kivy/core/camera/camera_picamera.py
Normal file
@@ -0,0 +1,96 @@
|
||||
'''
|
||||
PiCamera Camera: Implement CameraBase with PiCamera
|
||||
'''
|
||||
|
||||
#
|
||||
# TODO: make usage of thread or multiprocess
|
||||
#
|
||||
|
||||
__all__ = ('CameraPiCamera', )
|
||||
|
||||
from math import ceil
|
||||
|
||||
from kivy.logger import Logger
|
||||
from kivy.clock import Clock
|
||||
from kivy.graphics.texture import Texture
|
||||
from kivy.core.camera import CameraBase
|
||||
|
||||
from picamera import PiCamera
|
||||
import numpy
|
||||
|
||||
|
||||
class CameraPiCamera(CameraBase):
|
||||
'''Implementation of CameraBase using PiCamera
|
||||
'''
|
||||
_update_ev = None
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self._camera = None
|
||||
self._format = 'bgr'
|
||||
self._framerate = kwargs.get('framerate', 30)
|
||||
super(CameraPiCamera, self).__init__(**kwargs)
|
||||
|
||||
def init_camera(self):
|
||||
if self._camera is not None:
|
||||
self._camera.close()
|
||||
|
||||
self._camera = PiCamera()
|
||||
self._camera.resolution = self.resolution
|
||||
self._camera.framerate = self._framerate
|
||||
self._camera.iso = 800
|
||||
|
||||
self.fps = 1. / self._framerate
|
||||
|
||||
if not self.stopped:
|
||||
self.start()
|
||||
|
||||
def raw_buffer_size(self):
|
||||
'''Round buffer size up to 32x16 blocks.
|
||||
|
||||
See https://picamera.readthedocs.io/en/release-1.13/recipes2.html#capturing-to-a-numpy-array
|
||||
''' # noqa
|
||||
return (
|
||||
ceil(self.resolution[0] / 32.) * 32,
|
||||
ceil(self.resolution[1] / 16.) * 16
|
||||
)
|
||||
|
||||
def _update(self, dt):
|
||||
if self.stopped:
|
||||
return
|
||||
|
||||
if self._texture is None:
|
||||
# Create the texture
|
||||
self._texture = Texture.create(self._resolution)
|
||||
self._texture.flip_vertical()
|
||||
self.dispatch('on_load')
|
||||
|
||||
try:
|
||||
bufsize = self.raw_buffer_size()
|
||||
output = numpy.empty(
|
||||
(bufsize[0] * bufsize[1] * 3,), dtype=numpy.uint8)
|
||||
self._camera.capture(output, self._format, use_video_port=True)
|
||||
|
||||
# Trim the buffer to fit the actual requested resolution.
|
||||
# TODO: Is there a simpler way to do all this reshuffling?
|
||||
output = output.reshape((bufsize[0], bufsize[1], 3))
|
||||
output = output[:self.resolution[0], :self.resolution[1], :]
|
||||
self._buffer = output.reshape(
|
||||
(self.resolution[0] * self.resolution[1] * 3,))
|
||||
|
||||
self._copy_to_gpu()
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception:
|
||||
Logger.exception('PiCamera: Couldn\'t get image from Camera')
|
||||
|
||||
def start(self):
|
||||
super(CameraPiCamera, self).start()
|
||||
if self._update_ev is not None:
|
||||
self._update_ev.cancel()
|
||||
self._update_ev = Clock.schedule_interval(self._update, self.fps)
|
||||
|
||||
def stop(self):
|
||||
super(CameraPiCamera, self).stop()
|
||||
if self._update_ev is not None:
|
||||
self._update_ev.cancel()
|
||||
self._update_ev = None
|
||||
Reference in New Issue
Block a user