离线语音识别android采取不需要的声音
Posted
技术标签:
【中文标题】离线语音识别android采取不需要的声音【英文标题】:Offline voice recognition android taking unwanted voice 【发布时间】:2015-09-04 04:26:32 【问题描述】:我做了很多研究并尝试了离线袖珍狮身人面像,但它正在接收周围的声音,并且对我的应用做出不同的反应。是否有任何适用于 kitkat 4.4 的 Google 离线应用程序。我从 2 周开始尝试它。感谢您的宝贵回答。
详细信息:当活动开始时,第一个文本必须在完成后阅读内容(文本到语音),阅读语音识别必须采取语音并按照命令例如:(下一个,上一个,前锋、选项、1、2、3、4 等)。根据命令,它必须在 onresult 方法中识别并对其做出反应。
错误:我听到一些声音后出现错误
06-18 19:54:00.159: V/onBeginningOfSpeech(3360): onBeginningOfSpeech
06-18 19:54:01.024: V/onPartialResult(3360): option
06-18 19:54:01.109: I/cmusphinx(3360): INFO: fsg_search.c(843): 105 frames, 5333 HMMs (50/fr), 7748 senones (73/fr), 371 history entries (3/fr)
06-18 19:54:01.110: I/SpeechRecognizer(3360): Stop recognition
06-18 19:54:01.110: E/cmusphinx(3360): ERROR: "fsg_search.c", line 913: Final result does not match the grammar in frame 105
06-18 19:54:01.111: V/onPartialResult-->(3360): option
06-18 19:54:01.111: V/onResult(3360): onResult
android Hive offline link
但如果我关闭互联网,它就无法离线工作。
在pocketsphinx 中,它对正确的单词没有反应。如果我说“下一个”,它会靠近另一个人的语音内容附加“密钥”,这给我带来了很多问题。是否有任何解决方案或库可以离线使用。任何离线谷歌离线语音支持。
下面是我尝试过的代码
package com.example.sample1;
import static edu.cmu.pocketsphinx.SpeechRecognizerSetup.defaultSetup;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Locale;
import edu.cmu.pocketsphinx.RecognitionListener;
import android.app.Activity;
import android.graphics.Color;
import android.os.AsyncTask;
import android.os.Bundle;
import android.os.Handler;
import android.speech.tts.TextToSpeech;
import android.speech.tts.TextToSpeech.OnUtteranceCompletedListener;
import android.util.Log;
import android.view.View;
import android.view.View.OnClickListener;
import android.widget.Button;
import android.widget.ExpandableListView;
import android.widget.ExpandableListView.OnChildClickListener;
import android.widget.ExpandableListView.OnGroupClickListener;
import android.widget.ExpandableListView.OnGroupExpandListener;
import android.widget.TextView;
import android.widget.Toast;
import edu.cmu.pocketsphinx.Assets;
import edu.cmu.pocketsphinx.Hypothesis;
import edu.cmu.pocketsphinx.SpeechRecognizer;
public class Sam extends Activity implements RecognitionListener, TextToSpeech.OnInitListener
/* Named searches allow to quickly reconfigure the decoder */
private static final String DIGITS_SEARCH = "digits";
private SpeechRecognizer recognizer;
private HashMap<String, Integer> captions;
private TextView caption_text;
private TextView result_text;
ArrayList<String> result1;
private Button buttonLeft;
private Button buttonRight;
int count = 0;
private ArrayList<DataAnswer> dummyListTemp;
private ArrayList<DataAnswer> dummyList;
AnswerDataAdapter listAdapter = null;
int conteo = 0;
Handler a = new Handler();
private TextToSpeech tts;
String readIt ="";
HashMap<String, String> params = new HashMap<String, String>();
@Override
public void onCreate(Bundle state)
super.onCreate(state);
Log.v("onCreate", "onCreate");
// Prepare the data for UI
captions = new HashMap<String, Integer>();
captions.put(DIGITS_SEARCH, R.string.digits_caption);
setContentView(R.layout.quiz);
caption_text = (TextView) findViewById(R.id.caption_text);
result_text = (TextView) findViewById(R.id.result_text);
// listViewAnswer = (ExpandableListView) findViewById(R.id.listViewAnswer);
buttonRight = (Button) findViewById(R.id.buttonRight);
buttonLeft = (Button) findViewById(R.id.buttonLeft);
result_text.setText("Result --->: ");
tts = new TextToSpeech(this, this);
params.put(TextToSpeech.Engine.KEY_PARAM_UTTERANCE_ID,"stringId");
String text = "World is full of chanllenge";
//Speakes the text first and then after comple reading text voice recoginzation must start
speakOut(text);
buttonRight.setOnClickListener(new OnClickListener()
@Override
public void onClick(View v)
try
//onClickRight();
Toast.makeText(getApplicationContext(), "Right", Toast.LENGTH_SHORT).show();
catch (Exception e)
e.printStackTrace();
);
buttonLeft.setOnClickListener(new OnClickListener()
@Override
public void onClick(View v)
//onClickLeft();
Toast.makeText(getApplicationContext(), "Left", Toast.LENGTH_SHORT).show();
);
// Recognizer initialization is a time-consuming and it involves IO,
// so we execute it in async task
//if(!tts.isSpeaking())
new AsyncTask<Void, Void, Exception>()
@Override
protected Exception doInBackground(Void... params)
try
Assets assets = new Assets(Sam.this);
File assetDir = assets.syncAssets();
Log.v("AsyncTask", "AsyncTask");
setupRecognizer(assetDir);
catch (IOException e)
return e;
return null;
@Override
protected void onPostExecute(Exception result)
Log.v("onPostExecute", "onPostExecute");
try
if (result != null)
//caption_text.setText("Failed to init recognizer " + result);
Toast.makeText(getApplicationContext(), "Failed to init recognizer ", Toast.LENGTH_SHORT).show();
else
FireRecognition();
//switchSearch(DIGITS_SEARCH);
catch (Exception e)
e.printStackTrace();
.execute();
@Override
public void onDestroy()
super.onDestroy();
Log.v("onDestroy", "onDestroy");
recognizer.cancel();
recognizer.shutdown();
if (tts != null)
tts.stop();
tts.shutdown();
public void FireRecognition()
Log.d("Recognition","Recognition Started");
//caption_text.setText("Recognition Started!");
//Toast.makeText(getApplicationContext(), "Recognition Started!", Toast.LENGTH_SHORT).show();
recognizer.stop();
//recognizer.startListening("digits");
/**
* In partial result we get quick updates about current hypothesis. In
* keyword spotting mode we can react here, in other modes we need to wait
* for final result in onResult.
*/
@Override
public void onPartialResult(Hypothesis hypothesis)
try
//Log.v("onPartialResult", "onPartialResult");
if (hypothesis == null)
return;
Log.v("onPartialResult", hypothesis.getHypstr().toString());
String text = hypothesis.getHypstr();
if(recognizer !=null)
recognizer.stop();
caption_text.setText("Partial result -->: " + text);
Log.v("onPartialResult-->", text);
// Toast.makeText(getApplicationContext(), text, Toast.LENGTH_SHORT).show();
catch (Exception e)
e.printStackTrace();
/**
* This callback is called when we stop the recognizer.
*/
@Override
public void onResult(Hypothesis hypothesis)
try
Log.v("onResult", "onResult");
// result_text.setText("");
if (hypothesis != null)
String text = hypothesis.getHypstr();
//Toast.makeText(getApplicationContext(), text, Toast.LENGTH_SHORT).show();
// ((TextView) findViewById(R.id.result_text)).setText(text);
if(text.toLowerCase().equals("next"))
result_text.setText("Result --->: " + text);
Toast.makeText(getApplicationContext(), text, Toast.LENGTH_SHORT).show();
else if(text.toLowerCase().equals("previous"))
result_text.setText("Result --->: " + text);
Toast.makeText(getApplicationContext(), text, Toast.LENGTH_SHORT).show();
else if(text.toLowerCase().trim().equals("option one".toLowerCase().trim()))
result_text.setText("Result --->: " + text);
Toast.makeText(getApplicationContext(), text, Toast.LENGTH_SHORT).show();
result_text.setText("Result --->: " + text);
else if(text.toLowerCase().trim().equals("option two".toLowerCase().toString()))
Toast.makeText(getApplicationContext(), text, Toast.LENGTH_SHORT).show();
else if(text.toLowerCase().trim().equals("option three".toLowerCase().toString()))
result_text.setText("Result --->: " + text);
Toast.makeText(getApplicationContext(), text, Toast.LENGTH_SHORT).show();
else if(text.toLowerCase().trim().equals("option four".toLowerCase().toString()))
result_text.setText("Result --->: " + text);
Toast.makeText(getApplicationContext(), text, Toast.LENGTH_SHORT).show();
else
Toast.makeText(getApplicationContext(), " No Access:--" + text, Toast.LENGTH_SHORT).show();
Log.v("onResult-->", text);
if(recognizer != null)
recognizer.startListening("digits");
catch (Exception e)
e.printStackTrace();
@Override
public void onBeginningOfSpeech()
Log.v("onBeginningOfSpeech", "onBeginningOfSpeech");
/**
* We stop recognizer here to get a final result
*/
@Override
public void onEndOfSpeech()
Log.v("onEndOfSpeech", "onEndOfSpeech");
if (!recognizer.getSearchName().equals(DIGITS_SEARCH))
switchSearch(DIGITS_SEARCH);
private void switchSearch(String searchName)
Log.v("switchSearch", "switchSearch--->" + searchName);
recognizer.stop();
// If we are not spotting, start listening with timeout (10000 ms or 10 seconds).
if (searchName.equals(DIGITS_SEARCH))
recognizer.startListening(searchName, 10000);
/* else
recognizer.startListening(searchName, 10000);*/
/* String caption = getResources().getString(captions.get(searchName));
caption_text.setText(caption);*/
private void setupRecognizer(File assetsDir) throws IOException
// The recognizer can be configured to perform multiple searches
// of different kind and switch between them
Log.v("setupRecognizer", "setupRecognizer");
recognizer = defaultSetup()
.setAcousticModel(new File(assetsDir, "en-us-ptm"))
.setDictionary(new File(assetsDir, "cmudict-en-us.dict"))
// To disable logging of raw audio comment out this call (takes a lot of space on the device)
.setRawLogDir(assetsDir)
// Threshold to tune for keyphrase to balance between false alarms and misses
.setKeywordThreshold(1e-20f) //1e-20f 1e-45f
// Use context-independent phonetic search, context-dependent is too slow for mobile
// .setBoolean("-allphone_ci", true)
.getRecognizer();
recognizer.addListener(this);
/** In your application you might not need to add all those searches.
* They are added here for demonstration. You can leave just one.
*/
// Create keyword-activation search.
// recognizer.addKeyphraseSearch(KWS_SEARCH, KEYPHRASE);
// Create grammar-based search for digit recognition
File digitsGrammar = new File(assetsDir, "digits.gram");
recognizer.addGrammarSearch(DIGITS_SEARCH, digitsGrammar);
@Override
public void onError(Exception error)
Log.v("onError", "onError");
//caption_text.setText(error.getMessage());
Toast.makeText(getApplicationContext(), error.getMessage(), Toast.LENGTH_SHORT).show();
@Override
public void onTimeout()
Log.v("onTimeout", "onTimeout");
switchSearch(DIGITS_SEARCH);
@SuppressWarnings("deprecation")
@Override
public void onInit(int status)
tts.setOnUtteranceCompletedListener(new OnUtteranceCompletedListener()
@Override
public void onUtteranceCompleted(String utteranceId)
runOnUiThread(new Runnable()
@Override
public void run()
if(recognizer != null)
recognizer.startListening("digits");
//Toast.makeText(getApplicationContext(), "Completed", Toast.LENGTH_LONG).show();
);
);
if (status == TextToSpeech.SUCCESS)
int result = tts.setLanguage(Locale.US);
Log.i("Success", "Completed");
if (result == TextToSpeech.LANG_MISSING_DATA || result == TextToSpeech.LANG_NOT_SUPPORTED)
Log.e("TTS", "This Language is not supported");
else
//buttonSpeak.setEnabled(true);
// speakOut();
String text = " No Voice Found".toString();
Log.i("else", "else");
// speakOut(text);
else
Log.e("TTS", "Initilization Failed!");
@SuppressWarnings("deprecation")
private void speakOut(String text)
if(tts.isSpeaking())
//recognizer.stop();
// String text = ((TextView) findViewById(R.id.caption_text)).getText().toString();
tts.speak(text, TextToSpeech.QUEUE_FLUSH, params);
Log.i("Speaking-->", "****" + tts.isSpeaking());
在 digit.gram 中
#JSGF V1.0;
grammar digits;
<digit> = option one |
option two |
option three |
option four |
back |
previous |
next;
public <digits> = <digit>+;
以上是我的代码,如果有人处理过,请告诉我解决方案 这是我的 xml quiz.xml
<?xml version="1.0" encoding="utf-8"?>
<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_
android:layout_
android:orientation="vertical" >
<LinearLayout
android:id="@+id/linearLayout2"
android:layout_
android:layout_
android:layout_alignParentLeft="true"
android:layout_below="@+id/linearLayout1"
android:orientation="vertical" >
<ExpandableListView
android:id="@+id/listViewAnswer"
android:layout_
android:layout_
android:focusable="false"
android:focusableInTouchMode="false"
android:childDivider="#334455" >
</ExpandableListView>
</LinearLayout>
<RelativeLayout
android:id="@+id/relativeLayout1"
android:layout_
android:layout_
android:layout_alignParentLeft="true"
android:layout_below="@+id/linearLayout2"
android:layout_marginTop="10dip"
android:orientation="horizontal" >
<Button
android:id="@+id/buttonRight"
android:layout_
android:layout_
android:layout_alignParentRight="true"
android:layout_marginRight="10dip"
android:text="Next" />
<Button
android:id="@+id/buttonLeft"
android:layout_
android:layout_
android:layout_marginLeft="10dip"
android:text="Previous" />
</RelativeLayout>
<RelativeLayout
android:id="@+id/relativeLayout2"
android:layout_
android:layout_
android:layout_alignParentLeft="true"
android:layout_below="@+id/relativeLayout1" >
<TextView
android:id="@+id/result_text"
android:layout_
android:layout_
android:layout_alignParentLeft="true"
android:textAppearance="?android:attr/textAppearanceMedium"
android:textColor="#556677" />
<TextView
android:id="@+id/caption_text"
android:layout_
android:layout_
android:layout_below="@+id/result_text"
android:layout_alignParentLeft="true"
android:textColor="#443399" />
</RelativeLayout>
</RelativeLayout>
更新:
如果你正在使用 addKeywordSearch
File digitsGrammar = new File(context.getFilesDir(), "digits.gram");
recognizer.addKeywordSearch(DIGITS_SEARCH, digitsGrammar);
然后在 digit.gram 中只写
option one /1e-1/
option two /1e-1/
option three /1e-1/
option four /1e-1/
back /1e-1/
previous /1e-1/
next /1e-1/
或
如果您使用的是 addGrammarSearch
File digitsGrammar = new File(context.getFilesDir(), "digits.gram");
recognizer.addGrammarSearch(DIGITS_SEARCH, digitsGrammar);
然后在 digit.gram 中
#JSGF V1.0;
grammar digits;
<digit> = option one |
option two |
option three |
option four |
back |
previous |
next;
public <digits> = <digit>+;
【问题讨论】:
【参考方案1】:你需要使用关键词定位模式而不是语法模式来持续聆听
您可以在此处找到示例:
Recognizing multiple keywords using PocketSphinx
【讨论】:
怎么做。我可以得到任何相关的样本吗? 而不是识别器.addGrammarSearch(DIGITS_SEARCH, digitsGrammar);我必须添加识别器.addKeywordSearch(DIGITS_SEARCH, digitsGrammar);仪式。然后我需要在 digit.gram 中编辑什么 更新了上面的 digit.gram 是我必须为关键字搜索更改的写入方式 您可以在这里找到示例:***.com/questions/25748113/… 我已经尝试过该示例,但它在 digit.gram 和 jsgf 错误中显示错误。这是在 digit.gram #JSGF V1.0 中编辑的正确方法;语法数字; = 选项一 /1.0/ |选项二 /1.0/ |选项三 /1.0/ |选项四 /1.0/ |返回 /1.0/ |以前的 /1.0/ |下一个;公共 = +;【参考方案2】:从 Android M 开始,您可以在创建识别器意图时使用 EXTRA_PREFER_OFFLINE。
【讨论】:
以上是关于离线语音识别android采取不需要的声音的主要内容,如果未能解决你的问题,请参考以下文章