В настоящее время я разрабатываю приложение для Android, которое обнаруживает объекты с помощью камеры с помощью API обнаружения тензорного потока.Я хочу сделать скриншот активности камеры, где происходит обнаружение.
Как сделать снимок экрана с работой камеры, включая сам предварительный просмотр камеры?
Этот код взят из демонстрационного приложения tenorflow.Я просто хочу добавить функцию скриншота, используя кнопку.Вот почему я добавил кнопку и просмотр изображения.В представлении «Изображение» отображается предварительный просмотр снимка экрана, но я не имею ни малейшего представления о том, как это реализовать: (
Это код действия камеры:
CameraActivity.java
public abstract class CameraActivity extends Activity
implements OnImageAvailableListener, Camera.PreviewCallback {
private static final Logger LOGGER = new Logger();
private static final int PERMISSIONS_REQUEST = 1;
private static final String PERMISSION_CAMERA = Manifest.permission.CAMERA;
private static final String PERMISSION_STORAGE = Manifest.permission.WRITE_EXTERNAL_STORAGE;
private boolean debug = false;
private Handler handler;
private HandlerThread handlerThread;
private boolean useCamera2API;
private boolean isProcessingFrame = false;
private byte[][] yuvBytes = new byte[3][];
private int[] rgbBytes = null;
private int yRowStride;
protected int previewWidth = 0;
protected int previewHeight = 0;
private Runnable postInferenceCallback;
private Runnable imageConverter;
@Override
protected void onCreate(final Bundle savedInstanceState) {
LOGGER.d("onCreate " + this);
super.onCreate(null);
getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
setContentView(R.layout.activity_camera);
if (hasPermission()) {
setFragment();
} else {
requestPermission();
}
}
private byte[] lastPreviewFrame;
protected int[] getRgbBytes() {
imageConverter.run();
return rgbBytes;
}
protected int getLuminanceStride() {
return yRowStride;
}
protected byte[] getLuminance() {
return yuvBytes[0];
}
/**
* Callback for android.hardware.Camera API
*/
@Override
public void onPreviewFrame(final byte[] bytes, final Camera camera) {
if (isProcessingFrame) {
LOGGER.w("Dropping frame!");
return;
}
try {
// Initialize the storage bitmaps once when the resolution is known.
if (rgbBytes == null) {
Camera.Size previewSize = camera.getParameters().getPreviewSize();
previewHeight = previewSize.height;
previewWidth = previewSize.width;
rgbBytes = new int[previewWidth * previewHeight];
onPreviewSizeChosen(new Size(previewSize.width, previewSize.height), 90);
}
} catch (final Exception e) {
LOGGER.e(e, "Exception!");
return;
}
isProcessingFrame = true;
lastPreviewFrame = bytes;
yuvBytes[0] = bytes;
yRowStride = previewWidth;
imageConverter =
new Runnable() {
@Override
public void run() {
ImageUtils.convertYUV420SPToARGB8888(bytes, previewWidth, previewHeight, rgbBytes);
}
};
postInferenceCallback =
new Runnable() {
@Override
public void run() {
camera.addCallbackBuffer(bytes);
isProcessingFrame = false;
}
};
processImage();
}
/**
* Callback for Camera2 API
*/
@Override
public void onImageAvailable(final ImageReader reader) {
//We need wait until we have some size from onPreviewSizeChosen
if (previewWidth == 0 || previewHeight == 0) {
return;
}
if (rgbBytes == null) {
rgbBytes = new int[previewWidth * previewHeight];
}
try {
final Image image = reader.acquireLatestImage();
if (image == null) {
return;
}
if (isProcessingFrame) {
image.close();
return;
}
isProcessingFrame = true;
Trace.beginSection("imageAvailable");
final Plane[] planes = image.getPlanes();
fillBytes(planes, yuvBytes);
yRowStride = planes[0].getRowStride();
final int uvRowStride = planes[1].getRowStride();
final int uvPixelStride = planes[1].getPixelStride();
imageConverter =
new Runnable() {
@Override
public void run() {
ImageUtils.convertYUV420ToARGB8888(
yuvBytes[0],
yuvBytes[1],
yuvBytes[2],
previewWidth,
previewHeight,
yRowStride,
uvRowStride,
uvPixelStride,
rgbBytes);
}
};
postInferenceCallback =
new Runnable() {
@Override
public void run() {
image.close();
isProcessingFrame = false;
}
};
processImage();
} catch (final Exception e) {
LOGGER.e(e, "Exception!");
Trace.endSection();
return;
}
Trace.endSection();
}
@Override
public synchronized void onStart() {
LOGGER.d("onStart " + this);
super.onStart();
}
@Override
public synchronized void onResume() {
LOGGER.d("onResume " + this);
super.onResume();
handlerThread = new HandlerThread("inference");
handlerThread.start();
handler = new Handler(handlerThread.getLooper());
}
@Override
public synchronized void onPause() {
LOGGER.d("onPause " + this);
if (!isFinishing()) {
LOGGER.d("Requesting finish");
finish();
}
handlerThread.quitSafely();
try {
handlerThread.join();
handlerThread = null;
handler = null;
} catch (final InterruptedException e) {
LOGGER.e(e, "Exception!");
}
super.onPause();
}
@Override
public synchronized void onStop() {
LOGGER.d("onStop " + this);
super.onStop();
}
@Override
public synchronized void onDestroy() {
LOGGER.d("onDestroy " + this);
super.onDestroy();
}
protected synchronized void runInBackground(final Runnable r) {
if (handler != null) {
handler.post(r);
}
}
@Override
public void onRequestPermissionsResult(
final int requestCode, final String[] permissions, final int[] grantResults) {
if (requestCode == PERMISSIONS_REQUEST) {
if (grantResults.length > 0
&& grantResults[0] == PackageManager.PERMISSION_GRANTED
&& grantResults[1] == PackageManager.PERMISSION_GRANTED) {
setFragment();
} else {
requestPermission();
}
}
}
private boolean hasPermission() {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
return checkSelfPermission(PERMISSION_CAMERA) == PackageManager.PERMISSION_GRANTED &&
checkSelfPermission(PERMISSION_STORAGE) == PackageManager.PERMISSION_GRANTED;
} else {
return true;
}
}
private void requestPermission() {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
if (shouldShowRequestPermissionRationale(PERMISSION_CAMERA) ||
shouldShowRequestPermissionRationale(PERMISSION_STORAGE)) {
Toast.makeText(CameraActivity.this,
"Camera AND storage permission are required for this demo", Toast.LENGTH_LONG).show();
}
requestPermissions(new String[] {PERMISSION_CAMERA, PERMISSION_STORAGE}, PERMISSIONS_REQUEST);
}
}
// Returns true if the device supports the required hardware level, or better.
private boolean isHardwareLevelSupported(
CameraCharacteristics characteristics, int requiredLevel) {
int deviceLevel = characteristics.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL);
if (deviceLevel == CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY) {
return requiredLevel == deviceLevel;
}
// deviceLevel is not LEGACY, can use numerical sort
return requiredLevel <= deviceLevel;
}
private String chooseCamera() {
final CameraManager manager = (CameraManager) getSystemService(Context.CAMERA_SERVICE);
try {
for (final String cameraId : manager.getCameraIdList()) {
final CameraCharacteristics characteristics = manager.getCameraCharacteristics(cameraId);
// We don't use a front facing camera in this sample.
final Integer facing = characteristics.get(CameraCharacteristics.LENS_FACING);
if (facing != null && facing == CameraCharacteristics.LENS_FACING_FRONT) {
continue;
}
final StreamConfigurationMap map =
characteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP);
if (map == null) {
continue;
}
// Fallback to camera1 API for internal cameras that don't have full support.
// This should help with legacy situations where using the camera2 API causes
// distorted or otherwise broken previews.
useCamera2API = (facing == CameraCharacteristics.LENS_FACING_EXTERNAL)
|| isHardwareLevelSupported(characteristics,
CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_FULL);
LOGGER.i("Camera API lv2?: %s", useCamera2API);
return cameraId;
}
} catch (CameraAccessException e) {
LOGGER.e(e, "Not allowed to access camera");
}
return null;
}
protected void setFragment() {
String cameraId = chooseCamera();
Fragment fragment;
if (useCamera2API) {
CameraConnectionFragment camera2Fragment =
CameraConnectionFragment.newInstance(
new CameraConnectionFragment.ConnectionCallback() {
@Override
public void onPreviewSizeChosen(final Size size, final int rotation) {
previewHeight = size.getHeight();
previewWidth = size.getWidth();
CameraActivity.this.onPreviewSizeChosen(size, rotation);
}
},
this,
getLayoutId(),
getDesiredPreviewFrameSize());
camera2Fragment.setCamera(cameraId);
fragment = camera2Fragment;
} else {
fragment =
new LegacyCameraConnectionFragment(this, getLayoutId(), getDesiredPreviewFrameSize());
}
getFragmentManager()
.beginTransaction()
.replace(R.id.container, fragment)
.commit();
}
protected void fillBytes(final Plane[] planes, final byte[][] yuvBytes) {
// Because of the variable row stride it's not possible to know in
// advance the actual necessary dimensions of the yuv planes.
for (int i = 0; i < planes.length; ++i) {
final ByteBuffer buffer = planes[i].getBuffer();
if (yuvBytes[i] == null) {
LOGGER.d("Initializing buffer %d at size %d", i, buffer.capacity());
yuvBytes[i] = new byte[buffer.capacity()];
}
buffer.get(yuvBytes[i]);
}
}
public boolean isDebug() {
return debug;
}
public void requestRender() {
final OverlayView overlay = (OverlayView) findViewById(R.id.debug_overlay);
if (overlay != null) {
overlay.postInvalidate();
}
}
public void addCallback(final OverlayView.DrawCallback callback) {
final OverlayView overlay = (OverlayView) findViewById(R.id.debug_overlay);
if (overlay != null) {
overlay.addCallback(callback);
}
}
public void onSetDebug(final boolean debug) {}
@Override
public boolean onKeyDown(final int keyCode, final KeyEvent event) {
if (keyCode == KeyEvent.KEYCODE_VOLUME_DOWN || keyCode == KeyEvent.KEYCODE_VOLUME_UP) {
debug = !debug;
requestRender();
onSetDebug(debug);
return true;
}
return super.onKeyDown(keyCode, event);
}
protected void readyForNextImage() {
if (postInferenceCallback != null) {
postInferenceCallback.run();
}
}
protected int getScreenOrientation() {
switch (getWindowManager().getDefaultDisplay().getRotation()) {
case Surface.ROTATION_270:
return 270;
case Surface.ROTATION_180:
return 180;
case Surface.ROTATION_90:
return 90;
default:
return 0;
}
}
protected abstract void processImage();
protected abstract void onPreviewSizeChosen(final Size size, final int rotation);
protected abstract int getLayoutId();
protected abstract Size getDesiredPreviewFrameSize();
}
А это файл макета
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:orientation="vertical"
xmlns:android="http://schemas.android.com/apk/res/android">
<FrameLayout
android:id="@+id/container"
android:layout_width="match_parent"
android:layout_height="500dp"
tools:context="org.tensorflow.demo.CameraActivity">
<Button
android:id="@+id/button"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Screenshot" />
</FrameLayout>
<ImageView
android:id="@+id/imageView"
android:layout_width="140dp"
android:layout_height="160dp"
android:layout_marginStart="205dp"
android:layout_marginTop="0dp" />
Я просто хочу сделать скриншотдеятельность, включая камеру, где происходит обнаружение. Я попробовал несколько уроков о том, как сделать снимок экрана. Он захватывает все части экрана, но не захватывает часть камеры. Он показывает только черный экран. HELP!