Fast snapshot with new GL preview. Take snapshots while taking video

v2
Mattia Iavarone 6 years ago
parent c6d9f18ed7
commit c669308ba8
  1. 13
      MIGRATION.md
  2. 41
      cameraview/src/main/gles/com/otaliastudios/cameraview/EglBaseSurface.java
  3. 2
      cameraview/src/main/gles/com/otaliastudios/cameraview/EglCore.java
  4. 63
      cameraview/src/main/gles/com/otaliastudios/cameraview/EglViewport.java
  5. 8
      cameraview/src/main/gles/com/otaliastudios/cameraview/EglWindowSurface.java
  6. 2
      cameraview/src/main/gles/com/otaliastudios/cameraview/VideoTextureEncoder.java
  7. 7
      cameraview/src/main/java/com/otaliastudios/cameraview/Camera1.java
  8. 108
      cameraview/src/main/java/com/otaliastudios/cameraview/SnapshotPictureRecorder.java
  9. 4
      cameraview/src/main/java/com/otaliastudios/cameraview/SnapshotVideoRecorder.java
  10. 6
      cameraview/src/main/options/com/otaliastudios/cameraview/Preview.java
  11. 236
      cameraview/src/main/views/com/otaliastudios/cameraview/GLCameraPreview.java

@ -32,7 +32,7 @@
- VideoSizeSelector: added. It is needed to choose the capture size in VIDEO mode.
Defaults to SizeSelectors.biggest(), but you can choose by aspect ratio or whatever.
- isTakingPicture(): added on top of isTakingVideo().
- takeVideoSnapshot(): new api. Requires the experimental flag, API 18 or it will throw.
- takeVideoSnapshot(): new api. API 18 and the Gl preview, or it will throw.
Respects orientation, videocodec and max duration limit.
Automatically rotates the data. Automatically crops the video.
NO audio support.
@ -41,10 +41,11 @@
The default is GlSurfaceView and it is highly recommended that you do not change this.
- New pictureRecorder interface for picture capturing.
- Created FullPictureRecorder and SnapshotPictureRecorder for capturing HQ pictures and snapshots.
TODO: cameraPreview documentation
TODO: takeVideoSnapshot documentation
- When preview is GlSurface, the SnapshotPictureRecorder will use the gl texture and draw it into JPEG.
This is really fast and allows us to avoid RotationHelper, creating bitmap copies, OOMs, EXIF stuff.
- When preview is GlSurface, you can take snapshots while recording video (or video snapshots!).
TODO: document this
- TODO: cameraPreview documentation
- TODO: takeVideoSnapshot documentation
TODO: add audio to the video snapshots
TODO: improve SnapshotPictureRecorder so that, if preview is GL, we catch the preview through GLES drawing
this would finally remove the RotationHelper and OOMs!

@ -25,6 +25,7 @@ import android.support.annotation.RequiresApi;
import android.util.Log;
import java.io.BufferedOutputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
@ -157,7 +158,7 @@ class EglBaseSurface extends EglElement {
* <p>
* Expects that this object's EGL surface is current.
*/
public void saveFrame(File file) throws IOException {
public void saveFrameToFile(File file) throws IOException {
if (!mEglCore.isCurrent(mEGLSurface)) {
throw new RuntimeException("Expected EGL context/surface is not current");
}
@ -196,4 +197,42 @@ class EglBaseSurface extends EglElement {
if (bos != null) bos.close();
}
}
/**
* Saves the EGL surface to jpeg.
* <p>
* Expects that this object's EGL surface is current.
*/
public byte[] saveFrameToJpeg() {
if (!mEglCore.isCurrent(mEGLSurface)) {
throw new RuntimeException("Expected EGL context/surface is not current");
}
// glReadPixels fills in a "direct" ByteBuffer with what is essentially big-endian RGBA
// data (i.e. a byte of red, followed by a byte of green...). While the Bitmap
// constructor that takes an int[] wants little-endian ARGB (blue/red swapped), the
// Bitmap "copy pixels" method wants the same format GL provides.
//
// Ideally we'd have some way to re-use the ByteBuffer, especially if we're calling
// here often.
//
// Making this even more interesting is the upside-down nature of GL, which means
// our output will look upside down relative to what appears on screen if the
// typical GL conventions are used.
int width = getWidth();
int height = getHeight();
ByteBuffer buf = ByteBuffer.allocateDirect(width * height * 4);
buf.order(ByteOrder.LITTLE_ENDIAN);
GLES20.glReadPixels(0, 0, width, height, GLES20.GL_RGBA, GLES20.GL_UNSIGNED_BYTE, buf);
check("glReadPixels");
buf.rewind();
ByteArrayOutputStream bos = new ByteArrayOutputStream(buf.array().length);
Bitmap bmp = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
bmp.copyPixelsFromBuffer(buf);
bmp.compress(Bitmap.CompressFormat.JPEG, 90, bos);
bmp.recycle();
return bos.toByteArray();
}
}

@ -28,6 +28,8 @@ import android.support.annotation.RequiresApi;
import android.util.Log;
import android.view.Surface;
import javax.microedition.khronos.egl.EGL10;
/**
* -- from grafika --
*

@ -56,20 +56,21 @@ class EglViewport extends EglElement {
};
// Stuff from Drawable2d.FULL_RECTANGLE
private FloatBuffer mVertexArray = floatBuffer(FULL_RECTANGLE_COORDS);
private FloatBuffer mTexCoordArray = floatBuffer(FULL_RECTANGLE_TEX_COORDS);
private FloatBuffer mVertexCoordinatesArray = floatBuffer(FULL_RECTANGLE_COORDS);
private FloatBuffer mTextureCoordinatesArray = floatBuffer(FULL_RECTANGLE_TEX_COORDS);
private int mVertexCount = FULL_RECTANGLE_COORDS.length / 2;
private final int mCoordsPerVertex = 2;
private final int mCoordinatesPerVertex = 2;
private final int mVertexStride = 8;
private final int mTexCoordStride = 8;
private final int mTextureStride = 8;
// Stuff from Texture2dProgram
private int mProgramHandle;
private int mTextureTarget;
private int muMVPMatrixLoc;
private int muTexMatrixLoc;
private int maPositionLoc;
private int maTextureCoordLoc;
// Program attributes
private int muMVPMatrixLocation;
private int muTexMatrixLocation;
private int maPositionLocation;
private int maTextureCoordLocation;
// private int muKernelLoc; // Used for filtering
// private int muTexOffsetLoc; // Used for filtering
@ -78,14 +79,14 @@ class EglViewport extends EglElement {
EglViewport() {
mTextureTarget = GLES11Ext.GL_TEXTURE_EXTERNAL_OES;
mProgramHandle = createProgram(SIMPLE_VERTEX_SHADER, SIMPLE_FRAGMENT_SHADER);
maPositionLoc = GLES20.glGetAttribLocation(mProgramHandle, "aPosition");
checkLocation(maPositionLoc, "aPosition");
maTextureCoordLoc = GLES20.glGetAttribLocation(mProgramHandle, "aTextureCoord");
checkLocation(maTextureCoordLoc, "aTextureCoord");
muMVPMatrixLoc = GLES20.glGetUniformLocation(mProgramHandle, "uMVPMatrix");
checkLocation(muMVPMatrixLoc, "uMVPMatrix");
muTexMatrixLoc = GLES20.glGetUniformLocation(mProgramHandle, "uTexMatrix");
checkLocation(muTexMatrixLoc, "uTexMatrix");
maPositionLocation = GLES20.glGetAttribLocation(mProgramHandle, "aPosition");
checkLocation(maPositionLocation, "aPosition");
maTextureCoordLocation = GLES20.glGetAttribLocation(mProgramHandle, "aTextureCoord");
checkLocation(maTextureCoordLocation, "aTextureCoord");
muMVPMatrixLocation = GLES20.glGetUniformLocation(mProgramHandle, "uMVPMatrix");
checkLocation(muMVPMatrixLocation, "uMVPMatrix");
muTexMatrixLocation = GLES20.glGetUniformLocation(mProgramHandle, "uTexMatrix");
checkLocation(muTexMatrixLocation, "uTexMatrix");
// Stuff from Drawable2d.FULL_RECTANGLE
@ -122,15 +123,15 @@ class EglViewport extends EglElement {
return texId;
}
void drawFrame(int textureId, float[] texMatrix) {
drawFrame(textureId, texMatrix,
IDENTITY_MATRIX, mVertexArray, 0,
mVertexCount, mCoordsPerVertex,
mVertexStride, mTexCoordArray,
mTexCoordStride);
void drawFrame(int textureId, float[] textureMatrix) {
drawFrame(textureId, textureMatrix,
IDENTITY_MATRIX, mVertexCoordinatesArray, 0,
mVertexCount, mCoordinatesPerVertex,
mVertexStride, mTextureCoordinatesArray,
mTextureStride);
}
private void drawFrame(int textureId, float[] texMatrix,
private void drawFrame(int textureId, float[] textureMatrix,
float[] mvpMatrix, FloatBuffer vertexBuffer, int firstVertex,
int vertexCount, int coordsPerVertex, int vertexStride,
FloatBuffer texBuffer, int texStride) {
@ -145,28 +146,28 @@ class EglViewport extends EglElement {
GLES20.glBindTexture(mTextureTarget, textureId);
// Copy the model / view / projection matrix over.
GLES20.glUniformMatrix4fv(muMVPMatrixLoc, 1, false, mvpMatrix, 0);
GLES20.glUniformMatrix4fv(muMVPMatrixLocation, 1, false, mvpMatrix, 0);
check("glUniformMatrix4fv");
// Copy the texture transformation matrix over.
GLES20.glUniformMatrix4fv(muTexMatrixLoc, 1, false, texMatrix, 0);
GLES20.glUniformMatrix4fv(muTexMatrixLocation, 1, false, textureMatrix, 0);
check("glUniformMatrix4fv");
// Enable the "aPosition" vertex attribute.
GLES20.glEnableVertexAttribArray(maPositionLoc);
GLES20.glEnableVertexAttribArray(maPositionLocation);
check("glEnableVertexAttribArray");
// Connect vertexBuffer to "aPosition".
GLES20.glVertexAttribPointer(maPositionLoc, coordsPerVertex,
GLES20.glVertexAttribPointer(maPositionLocation, coordsPerVertex,
GLES20.GL_FLOAT, false, vertexStride, vertexBuffer);
check("glVertexAttribPointer");
// Enable the "aTextureCoord" vertex attribute.
GLES20.glEnableVertexAttribArray(maTextureCoordLoc);
GLES20.glEnableVertexAttribArray(maTextureCoordLocation);
check("glEnableVertexAttribArray");
// Connect texBuffer to "aTextureCoord".
GLES20.glVertexAttribPointer(maTextureCoordLoc, 2,
GLES20.glVertexAttribPointer(maTextureCoordLocation, 2,
GLES20.GL_FLOAT, false, texStride, texBuffer);
check("glVertexAttribPointer");
@ -175,8 +176,8 @@ class EglViewport extends EglElement {
check("glDrawArrays");
// Done -- disable vertex array, texture, and program.
GLES20.glDisableVertexAttribArray(maPositionLoc);
GLES20.glDisableVertexAttribArray(maTextureCoordLoc);
GLES20.glDisableVertexAttribArray(maPositionLocation);
GLES20.glDisableVertexAttribArray(maTextureCoordLocation);
GLES20.glBindTexture(mTextureTarget, 0);
GLES20.glUseProgram(0);
}

@ -54,6 +54,14 @@ class EglWindowSurface extends EglBaseSurface {
createWindowSurface(surfaceTexture);
}
/**
* Associates an EGL surface with the Surface.
*/
public EglWindowSurface(EglCore eglCore, Surface surface) {
super(eglCore);
createWindowSurface(surface);
}
/**
* Releases any resources associated with the EGL surface (and, if configured to do so,
* with the Surface as well).

@ -157,7 +157,7 @@ class VideoTextureEncoder implements Runnable {
mInputWindowSurface = null;
}
if (mFullScreen != null) {
mFullScreen.release(false);
mFullScreen.release(true);
mFullScreen = null;
}
if (mEglCore != null) {

@ -527,6 +527,8 @@ class Camera1 extends CameraController implements Camera.PreviewCallback, Camera
@Override
public void run() {
if (mMode == Mode.VIDEO) {
// Could redirect to takePictureSnapshot, but it's better if people know
// what they are doing.
throw new IllegalStateException("Can't take hq pictures while in VIDEO mode");
}
@ -550,11 +552,6 @@ class Camera1 extends CameraController implements Camera.PreviewCallback, Camera
schedule(null, true, new Runnable() {
@Override
public void run() {
if (isTakingVideo()) {
// TODO v2: what to do here?
return;
}
LOG.v("takePictureSnapshot: performing.", isTakingPicture());
if (isTakingPicture()) return;

@ -1,12 +1,27 @@
package com.otaliastudios.cameraview;
import android.annotation.SuppressLint;
import android.graphics.Bitmap;
import android.graphics.ImageFormat;
import android.graphics.Rect;
import android.graphics.SurfaceTexture;
import android.graphics.YuvImage;
import android.hardware.Camera;
import android.opengl.EGL14;
import android.opengl.EGLContext;
import android.opengl.EGLDisplay;
import android.opengl.EGLSurface;
import android.opengl.GLES11Ext;
import android.opengl.GLES20;
import android.opengl.GLSurfaceView;
import android.opengl.Matrix;
import android.view.Surface;
import android.view.SurfaceHolder;
import android.view.SurfaceView;
import java.io.ByteArrayOutputStream;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
/**
* A {@link PictureResult} that uses standard APIs.
@ -23,12 +38,12 @@ class SnapshotPictureRecorder extends PictureRecorder {
private Size mSensorPreviewSize;
private int mFormat;
SnapshotPictureRecorder(PictureResult stub, Camera1 controller, Camera camera, AspectRatio viewRatio) {
SnapshotPictureRecorder(PictureResult stub, Camera1 controller, Camera camera, AspectRatio outputRatio) {
super(stub, controller);
mController = controller;
mPreview = controller.mPreview;
mCamera = camera;
mOutputRatio = viewRatio;
mOutputRatio = outputRatio;
mFormat = mController.mPreviewFormat;
mSensorPreviewSize = mController.mPreviewSize;
}
@ -42,11 +57,94 @@ class SnapshotPictureRecorder extends PictureRecorder {
}
}
private void takeGl(GLCameraPreview preview) {
// TODO implement.
takeLegacy();
@SuppressLint("NewApi")
private void takeGl(final GLCameraPreview preview) {
preview.addRendererFrameCallback(new GLCameraPreview.RendererFrameCallback() {
int mTextureId;
SurfaceTexture mSurfaceTexture;
float[] mTransform;
public void onRendererTextureCreated(int textureId) {
mTextureId = textureId;
mSurfaceTexture = new SurfaceTexture(mTextureId, true);
// Need to crop the size.
Rect crop = CropHelper.computeCrop(mResult.size, mOutputRatio);
mResult.size = new Size(crop.width(), crop.height());
mSurfaceTexture.setDefaultBufferSize(mResult.size.getWidth(), mResult.size.getHeight());
mTransform = new float[16];
}
@Override
public void onRendererFrame(SurfaceTexture surfaceTexture, final float scaleX, final float scaleY) {
preview.removeRendererFrameCallback(this);
// This kinda work but has drawbacks:
// - output is upside down due to coordinates in GL: need to flip the byte[] someway
// - output is not rotated as we would like to: need to create a bitmap copy...
// - works only in the renderer thread, where it allocates the buffer and reads pixels. Bad!
/*
ByteBuffer buffer = ByteBuffer.allocateDirect(width * height * 4);
buffer.order(ByteOrder.LITTLE_ENDIAN);
GLES20.glReadPixels(0, 0, width, height, GLES20.GL_RGBA, GLES20.GL_UNSIGNED_BYTE, buffer);
buffer.rewind();
ByteArrayOutputStream bos = new ByteArrayOutputStream(buffer.array().length);
Bitmap bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
bitmap.copyPixelsFromBuffer(buffer);
bitmap.compress(Bitmap.CompressFormat.JPEG, 90, bos);
bitmap.recycle(); */
// For this reason it is better to create a new surface,
// and draw the last frame again there.
final EGLContext eglContext = EGL14.eglGetCurrentContext();
final EglCore core = new EglCore(eglContext, EglCore.FLAG_RECORDABLE);
// final EGLSurface oldSurface = EGL14.eglGetCurrentSurface(EGL14.EGL_DRAW);
// final EGLDisplay oldDisplay = EGL14.eglGetCurrentDisplay();
WorkerHandler.run(new Runnable() {
@Override
public void run() {
EglWindowSurface surface = new EglWindowSurface(core, mSurfaceTexture);
surface.makeCurrent();
EglViewport viewport = new EglViewport();
mSurfaceTexture.updateTexImage();
mSurfaceTexture.getTransformMatrix(mTransform);
// Apply scale and crop:
// NOTE: scaleX and scaleY are in REF_VIEW, while our input appears to be in REF_SENSOR.
boolean flip = mController.flip(CameraController.REF_VIEW, CameraController.REF_SENSOR);
float realScaleX = flip ? scaleY : scaleX;
float realScaleY = flip ? scaleX : scaleY;
float scaleTranslX = (1F - realScaleX) / 2F;
float scaleTranslY = (1F - realScaleY) / 2F;
Matrix.translateM(mTransform, 0, scaleTranslX, scaleTranslY, 0);
Matrix.scaleM(mTransform, 0, realScaleX, realScaleY, 1);
// Apply rotation:
// Not sure why we need the minus here... It makes no sense.
int rotation = -mResult.rotation;
mResult.rotation = 0;
Matrix.translateM(mTransform, 0, 0.5F, 0.5F, 0);
Matrix.rotateM(mTransform, 0, rotation, 0, 0, 1);
Matrix.translateM(mTransform, 0, -0.5F, -0.5F, 0);
viewport.drawFrame(mTextureId, mTransform);
// don't - surface.swapBuffers();
mResult.jpeg = surface.saveFrameToJpeg();
mSurfaceTexture.releaseTexImage();
// EGL14.eglMakeCurrent(oldDisplay, oldSurface, oldSurface, eglContext);
surface.release();
viewport.release();
mSurfaceTexture.release();
core.release();
dispatchResult();
}
});
}
});
}
private void takeLegacy() {
mCamera.setOneShotPreviewCallback(new Camera.PreviewCallback() {
@Override

@ -33,7 +33,7 @@ class SnapshotVideoRecorder extends VideoRecorder implements GLCameraPreview.Ren
super(stub, listener);
mEncoder = new VideoTextureEncoder();
mPreview = preview;
mPreview.setRendererFrameCallback(this);
mPreview.addRendererFrameCallback(this);
}
@Override
@ -109,7 +109,7 @@ class SnapshotVideoRecorder extends VideoRecorder implements GLCameraPreview.Ren
mCurrentState = STATE_NOT_RECORDING;
mEncoder = null;
mPreview.setRendererFrameCallback(null);
mPreview.removeRendererFrameCallback(SnapshotVideoRecorder.this);
mPreview = null;
}

@ -16,13 +16,15 @@ public enum Preview implements Control {
/**
* Preview engine based on {@link android.view.TextureView}.
* Stable, but does not support all features (like video snapshots).
* Stable, but does not support all features (like video snapshots,
* or picture snapshot while taking videos).
*/
TEXTURE(1),
/**
* Preview engine based on {@link android.opengl.GLSurfaceView}.
* This is the best engine available.
* This is the best engine available. Supports video snapshots,
* and picture snapshots while taking videos.
*/
GL_SURFACE(2);

@ -11,7 +11,14 @@ import android.view.SurfaceHolder;
import android.view.View;
import android.view.ViewGroup;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import javax.microedition.khronos.egl.EGL10;
import javax.microedition.khronos.egl.EGLConfig;
import javax.microedition.khronos.egl.EGLDisplay;
import javax.microedition.khronos.egl.EGLSurface;
import javax.microedition.khronos.opengles.GL10;
/**
@ -19,42 +26,43 @@ import javax.microedition.khronos.opengles.GL10;
*
* - in the SurfaceTexture constructor we pass the GL texture handle that we have created.
*
* - The SurfaceTexture is linked to the Camera1 object. It will pass down buffers of data with
* a specified size (that is, the Camera1 preview size).
*
* - When SurfaceTexture.updateTexImage() is called, it will take the latest image from the camera stream
* and update it into the GL texture that was passed.
* - The SurfaceTexture is linked to the Camera1 object. The camera will pass down buffers of data with
* a specified size (that is, the Camera1 preview size). For this reason we don't have to specify
* surfaceTexture.setDefaultBufferSize() (like we do, for example, in SnapshotPictureRecorder).
*
* - Now we have a GL texture referencing data. It must be drawn.
* [Note: it must be drawn using a transformation matrix taken from SurfaceTexture]
* - When SurfaceTexture.updateTexImage() is called, it will fetch the latest texture image from the
* camera stream and assign it to the GL texture that was passed.
* Now the GL texture must be drawn using draw* APIs. The SurfaceTexture will also give us
* the transformation matrix to be applied.
*
* - The easy way to render an OpenGL texture is using the {@link GLSurfaceView} class.
* It manages the gl context, hosts a surface and runs a separated rendering thread that will perform
* It manages the GL context, hosts a surface and runs a separated rendering thread that will perform
* the rendering.
*
* - As per docs, we ask the GLSurfaceView to delegate rendering to us, using
* {@link GLSurfaceView#setRenderer(GLSurfaceView.Renderer)}. We request a render on the SurfaceView
* anytime the SurfaceTexture notifies that it has new data available (see OnFrameAvailableListener below).
*
* - Everything is linked:
* - So in short:
* - The SurfaceTexture has buffers of data of mInputStreamSize
* - The SurfaceView hosts a view (and surface) of size mOutputSurfaceSize
* - The SurfaceView hosts a view (and a surface) of size mOutputSurfaceSize.
* These are determined by the CameraView.onMeasure method.
* - We have a GL rich texture to be drawn (in the given method & thread).
*
* TODO
* CROPPING: Managed to do this using Matrix transformation.
* UPDATING: Managed to work using view.onPause and onResume.
* TAKING PICTURES: Sometime the snapshot takes ages... Can't reproduce anymore. Cool.
* TAKING VIDEOS: Still have not tried...
* This class will provide rendering callbacks to anyone who registers a {@link RendererFrameCallback}.
* Callbacks are guaranteed to be called on the renderer thread, which means that we can fetch
* the GL context that was created and is managed by the {@link GLSurfaceView}.
*/
class GLCameraPreview extends CameraPreview<GLSurfaceView, SurfaceTexture> implements GLSurfaceView.Renderer {
private boolean mDispatched;
private final float[] mTransformMatrix = new float[16];
private int mOutputTextureId = -1;
private int mOutputTextureId = 0;
private SurfaceTexture mInputSurfaceTexture;
private EglViewport mOutputViewport;
private RendererFrameCallback mRendererFrameCallback;
private Set<RendererFrameCallback> mRendererFrameCallbacks = Collections.synchronizedSet(new HashSet<RendererFrameCallback>());
/* for tests */ float mScaleX = 1F;
/* for tests */ float mScaleY = 1F;
GLCameraPreview(Context context, ViewGroup parent, SurfaceCallback callback) {
super(context, parent, callback);
@ -97,10 +105,8 @@ class GLCameraPreview extends CameraPreview<GLSurfaceView, SurfaceTexture> imple
@Override
void onDestroy() {
super.onDestroy();
releaseInputSurfaceTexture();
}
private void releaseInputSurfaceTexture() {
// View is gone, so EGL context is gone: callbacks make no sense anymore.
mRendererFrameCallbacks.clear();
if (mInputSurfaceTexture != null) {
mInputSurfaceTexture.setOnFrameAvailableListener(null);
mInputSurfaceTexture.release();
@ -113,28 +119,31 @@ class GLCameraPreview extends CameraPreview<GLSurfaceView, SurfaceTexture> imple
}
}
private void createInputSurfaceTexture() {
// Renderer thread
@Override
public void onSurfaceCreated(GL10 gl, EGLConfig config) {
mOutputViewport = new EglViewport();
mOutputTextureId = mOutputViewport.createTexture();
mInputSurfaceTexture = new SurfaceTexture(mOutputTextureId);
getView().queueEvent(new Runnable() {
@Override
public void run() {
for (RendererFrameCallback callback : mRendererFrameCallbacks) {
callback.onRendererTextureCreated(mOutputTextureId);
}
}
});
// Since we are using GLSurfaceView.RENDERMODE_WHEN_DIRTY, we must notify the SurfaceView
// of dirtyness, so that it draws again. This is how it's done.
mInputSurfaceTexture.setOnFrameAvailableListener(new SurfaceTexture.OnFrameAvailableListener() {
@Override
public void onFrameAvailable(SurfaceTexture surfaceTexture) {
// requestRender is thread-safe.
getView().requestRender();
getView().requestRender(); // requestRender is thread-safe.
}
});
}
// Renderer thread
@Override
public void onSurfaceCreated(GL10 gl, EGLConfig config) {
createInputSurfaceTexture();
}
// Renderer thread
@SuppressWarnings("StatementWithEmptyBody")
@Override
@ -143,27 +152,27 @@ class GLCameraPreview extends CameraPreview<GLSurfaceView, SurfaceTexture> imple
dispatchOnOutputSurfaceAvailable(width, height);
mDispatched = true;
} else if (mOutputSurfaceWidth == width && mOutputSurfaceHeight == height) {
// This change can be triggered by ourselves (see below). Ignore.
// I was experimenting and this was happening.
// Not sure if it is stil needed now.
} else {
// With other CameraPreview implementation we could just dispatch the 'size changed' event
// to the controller and everything would go straight. In case of GL, apparently we have to:
// - create a new texture (release the old)
// - unbind camera and surface
// - stop camera preview
// - recreate the GL context using view.onPause() and onResume()
// ...
onSizeChangeImplementation4(width, height);
// to the controller and everything would go straight. In case of GL, apparently we have to
// force recreate the EGLContext by calling onPause and onResume in the UI thread.
dispatchOnOutputSurfaceDestroyed();
getView().post(new Runnable() {
@Override
public void run() {
getView().onPause();
getView().onResume();
dispatchOnOutputSurfaceAvailable(width, height);
}
});
}
}
// Renderer thread
@Override
public void onDrawFrame(GL10 gl) {
// This are only needed with some implementations,
// and implementation4 seems to work well without them.
// if (mInputSurfaceTexture == null) return;
// if (mOutputViewport == null) return;
// Latch the latest frame. If there isn't anything new,
// we'll just re-use whatever was there before.
mInputSurfaceTexture.updateTexImage();
@ -172,24 +181,21 @@ class GLCameraPreview extends CameraPreview<GLSurfaceView, SurfaceTexture> imple
return;
}
if (mRendererFrameCallback != null) {
mRendererFrameCallback.onRendererFrame(mInputSurfaceTexture, mScaleX, mScaleY);
}
// Draw the video frame.
mInputSurfaceTexture.getTransformMatrix(mTransformMatrix);
if (isCropping()) {
// Scaling is easy. However:
// If the view is 10x1000 (very tall), it will show only the left strip of the preview (not the center one).
// If the view is 1000x10 (very large), it will show only the bottom strip of the preview (not the center one).
// We must use Matrix.translateM, and it must happen before the crop.
// So we must use Matrix.translateM, and it must happen before the crop.
float translX = (1F - mScaleX) / 2F;
float translY = (1F - mScaleY) / 2F;
Matrix.translateM(mTransformMatrix, 0, translX, translY, 0);
// Crop. Works, but without translation, it is not centered.
Matrix.scaleM(mTransformMatrix, 0, mScaleX, mScaleY, 1);
}
mOutputViewport.drawFrame(mOutputTextureId, mTransformMatrix);
for (RendererFrameCallback callback : mRendererFrameCallbacks) {
callback.onRendererFrame(mInputSurfaceTexture, mScaleX, mScaleY);
}
}
@Override
@ -207,9 +213,6 @@ class GLCameraPreview extends CameraPreview<GLSurfaceView, SurfaceTexture> imple
return true;
}
/* for tests */ float mScaleX = 1F;
/* for tests */ float mScaleY = 1F;
/**
* To crop in GL, we could actually use view.setScaleX and setScaleY, but only from Android N onward.
* See documentation: https://developer.android.com/reference/android/view/SurfaceView
@ -244,114 +247,39 @@ class GLCameraPreview extends CameraPreview<GLSurfaceView, SurfaceTexture> imple
mCropTask.end(null);
}
interface RendererFrameCallback {
// This does work but looks like a lot of stuff.
private void onSizeChangeImplementation1(final int width, final int height) {
releaseInputSurfaceTexture();
dispatchOnOutputSurfaceDestroyed();
getView().post(new Runnable() {
@Override
public void run() {
getView().onPause();
getView().onResume();
getView().queueEvent(new Runnable() {
@Override
public void run() {
createInputSurfaceTexture();
dispatchOnOutputSurfaceAvailable(width, height);
}
});
}
});
}
// This does not work. We get: startPreview failed.
private void onSizeChangeImplementation2(final int width, final int height) {
releaseInputSurfaceTexture();
getView().post(new Runnable() {
@Override
public void run() {
getView().onPause();
getView().onResume();
getView().queueEvent(new Runnable() {
@Override
public void run() {
createInputSurfaceTexture();
dispatchOnOutputSurfaceSizeChanged(width, height);
}
});
}
});
}
// Works! So we don't need to recreate the GL texture.
private void onSizeChangeImplementation3(final int width, final int height) {
dispatchOnOutputSurfaceDestroyed();
getView().post(new Runnable() {
@Override
public void run() {
getView().onPause();
getView().onResume();
getView().queueEvent(new Runnable() {
@Override
public void run() {
dispatchOnOutputSurfaceAvailable(width, height);
}
});
}
});
}
// Works! This is getting easy.
private void onSizeChangeImplementation4(final int width, final int height) {
dispatchOnOutputSurfaceDestroyed();
getView().post(new Runnable() {
@Override
public void run() {
getView().onPause();
getView().onResume();
dispatchOnOutputSurfaceAvailable(width, height);
}
});
}
/**
* Called on the renderer thread, hopefully only once, to notify that
* the texture was created (or to inform a new callback of the old texture).
*
* @param textureId the GL texture linked to the image stream
*/
void onRendererTextureCreated(int textureId);
// Does not work. onPause and onResume must be called on the UI thread.
// This make sense.
private void onSizeChangeImplementation5(final int width, final int height) {
dispatchOnOutputSurfaceDestroyed();
getView().onPause();
getView().onResume();
dispatchOnOutputSurfaceAvailable(width, height);
/**
* Called on the renderer thread after each frame was drawn.
* You are not supposed to hold for too long onto this thread, because
* well, it is the rendering thread.
*
* @param surfaceTexture the texture to get transformation
* @param scaleX the scaleX (in REF_VIEW) value
* @param scaleY the scaleY (in REF_VIEW) value
*/
void onRendererFrame(SurfaceTexture surfaceTexture, float scaleX, float scaleY);
}
// Does NOT work. The EGL context must be recreated
// for this to work out.
private void onSizeChangeImplementation6(final int width, final int height) {
dispatchOnOutputSurfaceDestroyed();
getView().post(new Runnable() {
void addRendererFrameCallback(@NonNull final RendererFrameCallback callback) {
getView().queueEvent(new Runnable() {
@Override
public void run() {
getView().setPreserveEGLContextOnPause(true);
getView().onPause();
getView().onResume();
getView().setPreserveEGLContextOnPause(false);
dispatchOnOutputSurfaceAvailable(width, height);
mRendererFrameCallbacks.add(callback);
if (mOutputTextureId != 0) callback.onRendererTextureCreated(mOutputTextureId);
}
});
}
interface RendererFrameCallback {
// Renderer thread.
void onRendererTextureCreated(int textureId);
// Renderer thread.
void onRendererFrame(SurfaceTexture surfaceTexture, float scaleX, float scaleY);
}
void setRendererFrameCallback(@Nullable RendererFrameCallback callback) {
mRendererFrameCallback = callback;
if (mRendererFrameCallback != null && mOutputTextureId != 0) {
mRendererFrameCallback.onRendererTextureCreated(mOutputTextureId);
}
void removeRendererFrameCallback(@NonNull final RendererFrameCallback callback) {
mRendererFrameCallbacks.remove(callback);
}
}

Loading…
Cancel
Save