在实时人脸检测 Android 中的 Live CameraPreview 上围绕人脸绘制矩形
Posted
技术标签:
【中文标题】在实时人脸检测 Android 中的 Live CameraPreview 上围绕人脸绘制矩形【英文标题】:Draw Rectangle Around Face on Live CameraPreview in Real Time Face Detection Android 【发布时间】:2021-09-15 00:57:47 【问题描述】:我正在创建一个打开手机相机的应用程序。我正在使用 FaceDetector (Google ML Kit) 进行实时人脸检测。我想在实时相机预览中显示检测到的面部周围的矩形。 我无法找到完美的答案。谁能帮帮我吗。提前致谢。 我附上代码供参考。
这是我的 activity_main.xml 文件
<LinearLayout
android:layout_
android:layout_
android:orientation="vertical"
xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools">
<FrameLayout
android:id="@+id/previewView_container"
android:layout_
android:layout_>
<androidx.camera.view.PreviewView
android:id="@+id/previewView"
android:layout_
android:layout_/>
</FrameLayout>
</LinerLayout>
下面是我的 MainActivity.java 文件。
public class MainActivity extends AppCompatActivity
FaceDetector detector;
private ListenableFuture<ProcessCameraProvider> cameraProviderFuture;
PreviewView previewView;
CameraSelector cameraSelector;
boolean start = true,flipX=false;
int cam_face=CameraSelector.LENS_FACING_FRONT;
ProcessCameraProvider cameraProvider;
@Override
protected void onCreate(Bundle savedInstanceState)
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
FaceDetectorOptions highAccuracyOpts =
new FaceDetectorOptions.Builder()
.setPerformanceMode(FaceDetectorOptions.PERFORMANCE_MODE_ACCURATE)
.setContourMode(FaceDetectorOptions.LANDMARK_MODE_ALL)
.build();
detector = FaceDetection.getClient(highAccuracyOpts);
cameraBind();
private void cameraBind()
cameraProviderFuture = ProcessCameraProvider.getInstance(this);
previewView=findViewById(R.id.previewView);
cameraProviderFuture.addListener(() ->
try
cameraProvider = cameraProviderFuture.get();
bindPreview(cameraProvider);
catch (ExecutionException | InterruptedException e)
// No errors need to be handled for this in Future.
// This should never be reached.
, ContextCompat.getMainExecutor(this));
void bindPreview(@NonNull ProcessCameraProvider cameraProvider)
Preview preview = new Preview.Builder()
.build();
cameraSelector = new CameraSelector.Builder()
.requireLensFacing(cam_face)
.build();
preview.setSurfaceProvider(previewView.getSurfaceProvider());
ImageAnalysis imageAnalysis = new ImageAnalysis.Builder()
.setTargetResolution(new Size(640, 480))
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build();
Executor executor = Executors.newSingleThreadExecutor();
imageAnalysis.setAnalyzer(executor, new ImageAnalysis.Analyzer()
@Override
public void analyze(@NonNull ImageProxy imageProxy)
InputImage image = null;
@SuppressLint("UnsafeExperimentalUsageError")
Image mediaImage = imageProxy.getImage();
if (mediaImage != null)
image = InputImage.fromMediaImage(mediaImage, imageProxy.getImageInfo().getRotationDegrees());
if (image != null)
detector.process(image)
.addOnSuccessListener(
new OnSuccessListener<List<Face>>()
@Override
public void onSuccess(List<Face> faces)
if(faces.size()!= 0)
Face face = faces.get(0);
Bitmap frame_bmp = toBitmap(mediaImage);
int rot = imageProxy.getImageInfo().getRotationDegrees();
Bitmap frame_bmp1 = rotateBitmap(frame_bmp, rot, flipX, false);
RectF boundingBox = new RectF(face.getBoundingBox());
Bitmap croppedFace = getCropBitmapByCPU(frame_bmp1, boundingBox);
Bitmap scaled = getResizedBitmap(croppedFace, 112, 112);
// will pass this scaled bitmap to model
// Canvas canvas = new Canvas();
// Paint paint = new Paint();
// paint.setColor(Color.GREEN);
// paint.setStyle(Paint.Style.STROKE);
// paint.setStrokeWidth(3);
// canvas.drawRect(boundingBox, paint);
try
Thread.sleep(100);
catch (InterruptedException e)
e.printStackTrace();
)
.addOnFailureListener(
new OnFailureListener()
@Override
public void onFailure(@NonNull Exception e)
)
.addOnCompleteListener(new OnCompleteListener<List<Face>>()
@Override
public void onComplete(@NonNull Task<List<Face>> task)
imageProxy.close();
);
);
cameraProvider.bindToLifecycle((LifecycleOwner) this, cameraSelector, imageAnalysis, preview);
private Bitmap toBitmap(Image image)
byte[] nv21=YUV_420_888toNV21(image);
YuvImage yuvImage = new YuvImage(nv21, ImageFormat.NV21, image.getWidth(), image.getHeight(), null);
ByteArrayOutputStream out = new ByteArrayOutputStream();
yuvImage.compressToJpeg(new Rect(0, 0, yuvImage.getWidth(), yuvImage.getHeight()), 100, out);
byte[] imageBytes = out.toByteArray();
return BitmapFactory.decodeByteArray(imageBytes, 0, imageBytes.length);
private static byte[] YUV_420_888toNV21(Image image)
int width = image.getWidth();
int height = image.getHeight();
int ySize = width*height;
int uvSize = width*height/4;
byte[] nv21 = new byte[ySize + uvSize*2];
ByteBuffer yBuffer = image.getPlanes()[0].getBuffer();
ByteBuffer uBuffer = image.getPlanes()[1].getBuffer();
ByteBuffer vBuffer = image.getPlanes()[2].getBuffer();
int rowStride = image.getPlanes()[0].getRowStride();
assert(image.getPlanes()[0].getPixelStride() == 1);
int pos = 0;
if (rowStride == width)
yBuffer.get(nv21, 0, ySize);
pos += ySize;
else
long yBufferPos = -rowStride;
for (; pos<ySize; pos+=width)
yBufferPos += rowStride;
yBuffer.position((int) yBufferPos);
yBuffer.get(nv21, pos, width);
rowStride = image.getPlanes()[2].getRowStride();
int pixelStride = image.getPlanes()[2].getPixelStride();
assert(rowStride == image.getPlanes()[1].getRowStride());
assert(pixelStride == image.getPlanes()[1].getPixelStride());
if (pixelStride == 2 && rowStride == width && uBuffer.get(0) == vBuffer.get(1))
byte savePixel = vBuffer.get(1);
try
vBuffer.put(1, (byte)~savePixel);
if (uBuffer.get(0) == (byte)~savePixel)
vBuffer.put(1, savePixel);
vBuffer.position(0);
uBuffer.position(0);
vBuffer.get(nv21, ySize, 1);
uBuffer.get(nv21, ySize + 1, uBuffer.remaining());
return nv21;
catch (ReadOnlyBufferException ex)
// unfortunately, we cannot check if vBuffer and uBuffer overlap
vBuffer.put(1, savePixel);
for (int row=0; row<height/2; row++)
for (int col=0; col<width/2; col++)
int vuPos = col*pixelStride + row*rowStride;
nv21[pos++] = vBuffer.get(vuPos);
nv21[pos++] = uBuffer.get(vuPos);
return nv21;
private static Bitmap rotateBitmap(Bitmap bitmap, int rotationDegrees, boolean flipX, boolean flipY)
Matrix matrix = new Matrix();
matrix.postRotate(rotationDegrees);
matrix.postScale(flipX ? -1.0f : 1.0f, flipY ? -1.0f : 1.0f);
Bitmap rotatedBitmap = Bitmap.createBitmap(bitmap, 0, 0, bitmap.getWidth(), bitmap.getHeight(), matrix, true);
if (rotatedBitmap != bitmap)
bitmap.recycle();
return rotatedBitmap;
public Bitmap getResizedBitmap(Bitmap bm, int newWidth, int newHeight)
int width = bm.getWidth();
int height = bm.getHeight();
float scaleWidth = ((float) newWidth) / width;
float scaleHeight = ((float) newHeight) / height;
Matrix matrix = new Matrix();
matrix.postScale(scaleWidth, scaleHeight);
Bitmap resizedBitmap = Bitmap.createBitmap(bm, 0, 0, width, height, matrix, false);
bm.recycle();
return resizedBitmap;
private static Bitmap getCropBitmapByCPU(Bitmap source, RectF cropRectF)
Bitmap resultBitmap = Bitmap.createBitmap((int) cropRectF.width(), (int) cropRectF.height(), Bitmap.Config.ARGB_8888);
Canvas cavas = new Canvas(resultBitmap);
Paint paint = new Paint(Paint.FILTER_BITMAP_FLAG);
paint.setColor(Color.WHITE);
cavas.drawRect(new RectF(0, 0, cropRectF.width(), cropRectF.height()), paint);
Matrix matrix = new Matrix();
matrix.postTranslate(-cropRectF.left, -cropRectF.top);
cavas.drawBitmap(source, matrix, paint);
if (source != null && !source.isRecycled())
source.recycle();
return resultBitmap;
【问题讨论】:
【参考方案1】:从检测到的人脸获取边界框后,您可以在检测到的人脸周围绘制一个矩形。请参阅ML Kit vision sample app,了解如何将此类 UI 应用于检测到的人脸。
【讨论】:
以上是关于在实时人脸检测 Android 中的 Live CameraPreview 上围绕人脸绘制矩形的主要内容,如果未能解决你的问题,请参考以下文章
使用“android-vision”库保存实时检测到的人脸(跟踪人脸)图像
Android App人脸识别中借助摄像头和OpenCV实时检测人脸讲解及实战(附源码和演示 超详细)