diff --git a/cameraview/src/main/java/com/otaliastudios/cameraview/Camera1.java b/cameraview/src/main/java/com/otaliastudios/cameraview/Camera1.java index 2b6cb0b4..1b87cb07 100644 --- a/cameraview/src/main/java/com/otaliastudios/cameraview/Camera1.java +++ b/cameraview/src/main/java/com/otaliastudios/cameraview/Camera1.java @@ -697,7 +697,7 @@ class Camera1 extends CameraController implements Camera.PreviewCallback, Camera // So it looks like REF_VIEW REF_OUTPUT is the correct one, meaning that // the input data in this case is not in the REF_SENSOR coordinates but rather // in the REF_VIEW ones. - videoResult.size = finalSize; + videoResult.size = flip(REF_VIEW, REF_OUTPUT) ? finalSize.flip() : finalSize; videoResult.rotation = offset(REF_VIEW, REF_OUTPUT); videoResult.audio = mAudio; videoResult.maxSize = mVideoMaxSize; diff --git a/cameraview/src/main/java/com/otaliastudios/cameraview/SnapshotVideoRecorder.java b/cameraview/src/main/java/com/otaliastudios/cameraview/SnapshotVideoRecorder.java index e1d44fb6..8f25dda6 100644 --- a/cameraview/src/main/java/com/otaliastudios/cameraview/SnapshotVideoRecorder.java +++ b/cameraview/src/main/java/com/otaliastudios/cameraview/SnapshotVideoRecorder.java @@ -64,9 +64,7 @@ class SnapshotVideoRecorder extends VideoRecorder implements GLCameraPreview.Ren @Override public void onRendererFrame(SurfaceTexture surfaceTexture, float scaleX, float scaleY) { if (mCurrentState == STATE_NOT_RECORDING && mDesiredState == STATE_RECORDING) { - // Size must be flipped based on rotation, because we will rotate the texture in the encoder - Size size = mResult.getRotation() % 180 == 0 ? mResult.getSize() : mResult.getSize().flip(); - // size = mResult.size; + Size size = mResult.getSize(); // Ensure width and height are divisible by 2, as I have read somewhere. int width = size.getWidth(); int height = size.getHeight();