OpenCV Android:如何找到头部的运动

Posted

技术标签:

【中文标题】OpenCV Android:如何找到头部的运动【英文标题】:OpenCV4Android: How to find the movements of head 【发布时间】:2014-09-09 06:38:27 【问题描述】:

我正在使用 Viola-Jones 方法检测人脸,当人脸倾斜时,算法可能无法正常工作。

我想检测那个动作,当它无法检测到面部时。

我可以使用运动检测来实现这一点,还是有其他方法可以找到运动。

提前致谢,

【问题讨论】:

【参考方案1】:

是的,您可以通过捕获和比较它们来做到这一点。

This 将帮助您检测它们,然后您可以比较 x 和 y 位置。

class DetectNose extends JPanel implements KeyListener, ActionListener 
private static final long serialVersionUID = 1L;
private static JFrame frame;
private BufferedImage image;
private CascadeClassifier face_cascade;
private Point center;
private JLabel label;
private Image scalledItemImage;
private double customY = 0;
private double customX = 0;
private Iterator<InputStream> iterator;
private ArrayList<BufferedImage> listOfCachedImages;
private int imageIndex = 1;
private int customZ = 0;
private Size size;
private Image scalledItemImageBackup;
private Point center1;
private int imgSize = 35;
private boolean isLocked;


public DetectNose(JFrame frame, List<Long> listOfOrnaments) 
    super();
    this.frame = frame;
    this.frame.setFocusable(true);
    this.frame.requestFocusInWindow();
    this.frame.addKeyListener(this);

    File f = null;
    try 
        System.out.println(System.getProperty("os.name"));
        if (System.getProperty("os.name").contains("Windows")) 
            f = new File("res/opencv_lib_win/opencv_java249.dll");
            System.load(f.getAbsolutePath());
            System.out.println("Loaded :" + f.getAbsolutePath());
         else 
            f = new File("res/opencv_lib/libopencv_java246.so");
            System.load(f.getAbsolutePath());
            System.out.println("Loaded :" + f.getAbsolutePath());
        
     catch (Exception ex) 
        ex.printStackTrace();
    

    List<InputStream> ornaments = DatabaseHandler
            .getOrnamentsImagesByListOfOrnaments(listOfOrnaments);

    iterator = ornaments.iterator();

    listOfCachedImages = new ArrayList<BufferedImage>();

    try 
        while (iterator.hasNext()) 
            InputStream inputStream = iterator.next();
            listOfCachedImages.add(ImageIO.read(inputStream));
        

        setFirstOrnament();

     catch (IOException e) 
        e.printStackTrace();
    
    label = new JLabel(new ImageIcon(scalledItemImage));
    add(label);
    face_cascade = new CascadeClassifier(
            "res/cascades/haarcascade_frontalface_alt_tree.xml");
    if (face_cascade.empty()) 
        System.out.println("--(!)Error loading A\n");
        return;
     else 
        System.out.println("Face classifier loaded up");
    


private void setFirstOrnament() 
    scalledItemImage = listOfCachedImages.get(imageIndex - 1);
    scalledItemImageBackup = scalledItemImage.getScaledInstance(700, 700,
            BufferedImage.TYPE_INT_RGB);
    scalledItemImage = scalledItemImage.getScaledInstance(imgSize, imgSize,
            BufferedImage.TYPE_INT_RGB);
    repaint();
    System.out.println("imageIndex = " + imageIndex);


private void setPrevOrnament() 
    if (imageIndex > 1) 
        imageIndex--;
        scalledItemImage = listOfCachedImages.get(imageIndex - 1);
        scalledItemImageBackup = scalledItemImage.getScaledInstance(700,
                700, BufferedImage.TYPE_INT_RGB);
        scalledItemImage = scalledItemImage.getScaledInstance(imgSize,
                imgSize, BufferedImage.TYPE_INT_RGB);
        GoLiveIntermediator.nextButton.setEnabled(true);
        repaint();
        revalidate();
        System.out.println("imageIndex = " + imageIndex);
     else 
        GoLiveIntermediator.prevButton.setEnabled(false);
    


private void setNextOrnament() 
    if (listOfCachedImages.size() > imageIndex) 
        imageIndex++;
        scalledItemImage = listOfCachedImages.get(imageIndex - 1);
        scalledItemImageBackup = scalledItemImage.getScaledInstance(700,
                700, BufferedImage.TYPE_INT_RGB);
        scalledItemImage = scalledItemImage.getScaledInstance(imgSize,
                imgSize, BufferedImage.TYPE_INT_RGB);
        GoLiveIntermediator.prevButton.setEnabled(true);
        repaint();
        revalidate();
        System.out.println("imageIndex = " + imageIndex);
     else 
        GoLiveIntermediator.nextButton.setEnabled(false);
    


private BufferedImage getimage() 
    return image;


public void setimage(BufferedImage newimage) 
    image = newimage;
    return;


public BufferedImage matToBufferedImage(Mat matrix) 
    int cols = matrix.cols();
    int rows = matrix.rows();
    int elemSize = (int) matrix.elemSize();
    byte[] data = new byte[cols * rows * elemSize];
    int type;
    matrix.get(0, 0, data);
    switch (matrix.channels()) 
    case 1:
        type = BufferedImage.TYPE_BYTE_GRAY;
        break;
    case 3:
        type = BufferedImage.TYPE_3BYTE_BGR;
        // bgr to rgb
        byte b;
        for (int i = 0; i < data.length; i = i + 3) 
            b = data[i];
            data[i] = data[i + 2];
            data[i + 2] = b;
        
        break;
    default:
        return null;
    
    BufferedImage image2 = new BufferedImage(cols, rows, type);
    image2.getRaster().setDataElements(0, 0, cols, rows, data);
    return image2;


public void paintComponent(Graphics g) 
    try 
        this.frame.requestFocusInWindow();
        BufferedImage temp = getimage();
        g.drawImage(temp, 0, 0, temp.getWidth(), temp.getHeight() + 50,
                this);
     catch (Exception ex) 
        System.out.print("Trying to load images...");
    


public Mat detect(Mat inputframe) 
    Mat mRgba = new Mat();
    Mat mGrey = new Mat();
    MatOfRect faces = new MatOfRect();
    inputframe.copyTo(mRgba);
    inputframe.copyTo(mGrey);
    Imgproc.cvtColor(mRgba, mGrey, Imgproc.COLOR_BGR2GRAY);
    Imgproc.equalizeHist(mGrey, mGrey);
    try 
        face_cascade.detectMultiScale(mGrey, faces);
     catch (Exception e) 
        System.out.print(".");
    
    frame.setLocationRelativeTo(null);
    frame.setResizable(false);

    for (Rect rect : faces.toArray()) 
        center = new Point(rect.x + rect.width * 0.5, rect.y + rect.height
                * 0.5); // You can use this to point out as first detection and last detection
        size = new Size(rect.width * 0.5, rect.height * 0.5);
        Core.ellipse(mRgba, center, size, 0, 0, 360, new Scalar(255, 0,
                255), 1, 8, 0);

        repaint();
    
    return mRgba;

这里的center是第一个检测点,你可以从图像中找到最后一个检测点。

【讨论】:

您发送的教程链接是用于 Java 中的人脸检测。将链接发送给我以比较 x 和 y 位置。 有什么方法可以检测OpenCV中从一帧到另一帧的变化。例如,如果一个人移动,则面部检测失败,我想检测从初始状态(面部检测)到未检测到面部的变化。 您可以跟踪检测并保存上次检测,然后测量点。 如果你得到了解决方案,那么你可以选择我的答案作为答案并投票。 请给我一个示例代码或链接。

以上是关于OpenCV Android:如何找到头部的运动的主要内容,如果未能解决你的问题,请参考以下文章

OpenCV:如何找到运动信息的质心/质心

如何使用适用于 Android 的 OpenCV 减少实时视频序列中的运动效果?

如何跟踪运动物体的轨迹openCV C++

OpenCV BackgroundSubtractor Android 运动检测器错误

Python-OpenCV 中头部姿势的仰角如何工作?

opencv 怎么识别出红色物体呀