eSpeak NG is an open source speech synthesizer that supports more than hundred languages and accents.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

TtsService.java 12KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. /*
  2. * Copyright (C) 2012-2015 Reece H. Dunn
  3. * Copyright (C) 2011 Google Inc.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. */
  17. /*
  18. * This file implements the Android Text-to-Speech engine for eSpeak.
  19. *
  20. * Android Version: 4.0 (Ice Cream Sandwich)
  21. * API Version: 14
  22. */
  23. package com.reecedunn.espeak;
  24. import android.annotation.SuppressLint;
  25. import android.content.BroadcastReceiver;
  26. import android.content.Context;
  27. import android.content.Intent;
  28. import android.content.IntentFilter;
  29. import android.media.AudioTrack;
  30. import android.os.Build;
  31. import android.os.Bundle;
  32. import android.preference.PreferenceManager;
  33. import android.speech.tts.SynthesisCallback;
  34. import android.speech.tts.SynthesisRequest;
  35. import android.speech.tts.TextToSpeech;
  36. import android.speech.tts.TextToSpeechService;
  37. import android.util.Log;
  38. import android.util.Pair;
  39. import com.reecedunn.espeak.SpeechSynthesis.SynthReadyCallback;
  40. import java.util.ArrayList;
  41. import java.util.HashMap;
  42. import java.util.HashSet;
  43. import java.util.List;
  44. import java.util.Locale;
  45. import java.util.Map;
  46. import java.util.Set;
  47. /**
  48. * Implements the eSpeak engine as a {@link TextToSpeechService}.
  49. *
  50. * @author [email protected] (Reece H. Dunn)
  51. * @author [email protected] (Alan Viverette)
  52. */
  53. @SuppressLint("NewApi")
  54. public class TtsService extends TextToSpeechService {
  55. public static final String ESPEAK_INITIALIZED = "com.reecedunn.espeak.ESPEAK_INITIALIZED";
  56. private static final String TAG = TtsService.class.getSimpleName();
  57. private static final boolean DEBUG = false;
  58. private SpeechSynthesis mEngine;
  59. private SynthesisCallback mCallback;
  60. private final Map<String, Voice> mAvailableVoices = new HashMap<String, Voice>();
  61. protected Voice mMatchingVoice = null;
  62. private BroadcastReceiver mOnLanguagesDownloaded = null;
  63. @Override
  64. public void onCreate() {
  65. initializeTtsEngine();
  66. super.onCreate();
  67. }
  68. @Override
  69. public void onDestroy() {
  70. super.onDestroy();
  71. if (mOnLanguagesDownloaded != null) {
  72. unregisterReceiver(mOnLanguagesDownloaded);
  73. }
  74. }
  75. /**
  76. * Sets up the native eSpeak engine.
  77. */
  78. private void initializeTtsEngine() {
  79. if (mEngine != null) {
  80. mEngine.stop();
  81. mEngine = null;
  82. }
  83. mEngine = new SpeechSynthesis(this, mSynthCallback);
  84. mAvailableVoices.clear();
  85. for (Voice voice : mEngine.getAvailableVoices()) {
  86. mAvailableVoices.put(voice.name, voice);
  87. }
  88. final Intent intent = new Intent(ESPEAK_INITIALIZED);
  89. sendBroadcast(intent);
  90. }
  91. @Override
  92. protected String[] onGetLanguage() {
  93. // This is used to specify the language requested from GetSampleText.
  94. if (mMatchingVoice == null) {
  95. return new String[] { "eng", "GBR", "" };
  96. }
  97. return new String[] {
  98. mMatchingVoice.locale.getISO3Language(),
  99. mMatchingVoice.locale.getISO3Country(),
  100. mMatchingVoice.locale.getVariant()
  101. };
  102. }
  103. private Pair<Voice, Integer> findVoice(String language, String country, String variant) {
  104. if (!CheckVoiceData.hasBaseResources(this) || CheckVoiceData.canUpgradeResources(this)) {
  105. if (mOnLanguagesDownloaded == null) {
  106. mOnLanguagesDownloaded = new BroadcastReceiver() {
  107. @Override
  108. public void onReceive(Context context, Intent intent) {
  109. initializeTtsEngine();
  110. }
  111. };
  112. final IntentFilter filter = new IntentFilter(DownloadVoiceData.BROADCAST_LANGUAGES_UPDATED);
  113. registerReceiver(mOnLanguagesDownloaded, filter);
  114. }
  115. final Intent intent = new Intent(this, DownloadVoiceData.class);
  116. intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
  117. startActivity(intent);
  118. return new Pair<>(null, TextToSpeech.LANG_MISSING_DATA);
  119. }
  120. final Locale query = new Locale(language, country, variant);
  121. Voice languageVoice = null;
  122. Voice countryVoice = null;
  123. synchronized (mAvailableVoices) {
  124. for (Voice voice : mAvailableVoices.values()) {
  125. switch (voice.match(query)) {
  126. case TextToSpeech.LANG_COUNTRY_VAR_AVAILABLE:
  127. return new Pair<>(voice, TextToSpeech.LANG_COUNTRY_VAR_AVAILABLE);
  128. case TextToSpeech.LANG_COUNTRY_AVAILABLE:
  129. countryVoice = voice;
  130. case TextToSpeech.LANG_AVAILABLE:
  131. languageVoice = voice;
  132. break;
  133. }
  134. }
  135. }
  136. if (languageVoice == null) {
  137. return new Pair<>(null, TextToSpeech.LANG_NOT_SUPPORTED);
  138. } else if (countryVoice == null) {
  139. return new Pair<>(languageVoice, TextToSpeech.LANG_AVAILABLE);
  140. } else {
  141. return new Pair<>(countryVoice, TextToSpeech.LANG_COUNTRY_AVAILABLE);
  142. }
  143. }
  144. private Pair<Voice, Integer> getDefaultVoiceFor(String language, String country, String variant) {
  145. final Pair<Voice, Integer> match = findVoice(language, country, variant);
  146. switch (match.second) {
  147. case TextToSpeech.LANG_AVAILABLE:
  148. if (language.equals("fr") || language.equals("fra")) {
  149. return new Pair<>(findVoice(language, "FRA", "").first, match.second);
  150. }
  151. if (language.equals("pt") || language.equals("por")) {
  152. return new Pair<>(findVoice(language, "PRT", "").first, match.second);
  153. }
  154. return new Pair<>(findVoice(language, "", "").first, match.second);
  155. case TextToSpeech.LANG_COUNTRY_AVAILABLE:
  156. if ((language.equals("vi") || language.equals("vie")) && (country.equals("VN") || country.equals("VNM"))) {
  157. return new Pair<>(findVoice(language, country, "hue").first, match.second);
  158. }
  159. return new Pair<>(findVoice(language, country, "").first, match.second);
  160. default:
  161. return match;
  162. }
  163. }
  164. @Override
  165. protected int onIsLanguageAvailable(String language, String country, String variant) {
  166. return findVoice(language, country, variant).second;
  167. }
  168. @Override
  169. protected int onLoadLanguage(String language, String country, String variant) {
  170. final Pair<Voice, Integer> match = getDefaultVoiceFor(language, country, variant);
  171. if (match.first != null) {
  172. mMatchingVoice = match.first;
  173. }
  174. return match.second;
  175. }
  176. @Override
  177. protected Set<String> onGetFeaturesForLanguage(String lang, String country, String variant) {
  178. return new HashSet<String>();
  179. }
  180. @Override
  181. public String onGetDefaultVoiceNameFor(String language, String country, String variant) {
  182. final Voice match = getDefaultVoiceFor(language, country, variant).first;
  183. return (match == null) ? null : match.name;
  184. }
  185. @Override
  186. public List<android.speech.tts.Voice> onGetVoices() {
  187. List<android.speech.tts.Voice> voices = new ArrayList<android.speech.tts.Voice>();
  188. for (Voice voice : mAvailableVoices.values()) {
  189. int quality = android.speech.tts.Voice.QUALITY_NORMAL;
  190. int latency = android.speech.tts.Voice.LATENCY_VERY_LOW;
  191. Locale locale = new Locale(voice.locale.getISO3Language(), voice.locale.getISO3Country(), voice.locale.getVariant());
  192. Set<String> features = onGetFeaturesForLanguage(locale.getLanguage(), locale.getCountry(), locale.getVariant());
  193. voices.add(new android.speech.tts.Voice(voice.name, voice.locale, quality, latency, false, features));
  194. }
  195. return voices;
  196. }
  197. @Override
  198. public int onIsValidVoiceName(String name) {
  199. Voice voice = mAvailableVoices.get(name);
  200. return (voice == null) ? TextToSpeech.ERROR : TextToSpeech.SUCCESS;
  201. }
  202. @Override
  203. public int onLoadVoice(String name) {
  204. Voice voice = mAvailableVoices.get(name);
  205. if (voice == null) {
  206. return TextToSpeech.ERROR;
  207. }
  208. mMatchingVoice = voice;
  209. return TextToSpeech.SUCCESS;
  210. }
  211. @Override
  212. protected void onStop() {
  213. Log.i(TAG, "Received stop request.");
  214. mEngine.stop();
  215. }
  216. @SuppressWarnings("deprecation")
  217. private String getRequestString(SynthesisRequest request) {
  218. if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
  219. return request.getCharSequenceText().toString();
  220. } else {
  221. return request.getText();
  222. }
  223. }
  224. private int selectVoice(SynthesisRequest request) {
  225. if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
  226. final String name = request.getVoiceName();
  227. if (name != null && !name.isEmpty()) {
  228. return onLoadVoice(name);
  229. }
  230. }
  231. final int result = onLoadLanguage(request.getLanguage(), request.getCountry(), request.getVariant());
  232. switch (result) {
  233. case TextToSpeech.LANG_MISSING_DATA:
  234. case TextToSpeech.LANG_NOT_SUPPORTED:
  235. return TextToSpeech.ERROR;
  236. }
  237. return TextToSpeech.SUCCESS;
  238. }
  239. @Override
  240. protected synchronized void onSynthesizeText(SynthesisRequest request, SynthesisCallback callback) {
  241. if (mMatchingVoice == null)
  242. return;
  243. String text = getRequestString(request);
  244. if (text == null)
  245. return;
  246. if (DEBUG) {
  247. Log.i(TAG, "Received synthesis request: {language=\"" + mMatchingVoice.name + "\"}");
  248. final Bundle params = request.getParams();
  249. for (String key : params.keySet()) {
  250. Log.v(TAG,
  251. "Synthesis request contained param {" + key + ", " + params.get(key) + "}");
  252. }
  253. }
  254. if (text.startsWith("<?xml"))
  255. {
  256. // eSpeak does not recognise/skip "<?...?>" preprocessing tags,
  257. // so need to remove these before passing to synthesize.
  258. text = text.substring(text.indexOf("?>") + 2).trim();
  259. }
  260. mCallback = callback;
  261. mCallback.start(mEngine.getSampleRate(), mEngine.getAudioFormat(), mEngine.getChannelCount());
  262. final VoiceSettings settings = new VoiceSettings(PreferenceManager.getDefaultSharedPreferences(this), mEngine);
  263. mEngine.setVoice(mMatchingVoice, settings.getVoiceVariant());
  264. mEngine.Rate.setValue(settings.getRate(), request.getSpeechRate());
  265. mEngine.Pitch.setValue(settings.getPitch(), request.getPitch());
  266. mEngine.PitchRange.setValue(settings.getPitchRange());
  267. mEngine.Volume.setValue(settings.getVolume());
  268. mEngine.Punctuation.setValue(settings.getPunctuationLevel());
  269. mEngine.setPunctuationCharacters(settings.getPunctuationCharacters());
  270. mEngine.synthesize(text, text.startsWith("<speak"));
  271. }
  272. /**
  273. * Pipes synthesizer output from native eSpeak to an {@link AudioTrack}.
  274. */
  275. private final SpeechSynthesis.SynthReadyCallback mSynthCallback = new SynthReadyCallback() {
  276. @Override
  277. public void onSynthDataReady(byte[] audioData) {
  278. if ((audioData == null) || (audioData.length == 0)) {
  279. onSynthDataComplete();
  280. return;
  281. }
  282. final int maxBytesToCopy = mCallback.getMaxBufferSize();
  283. int offset = 0;
  284. while (offset < audioData.length) {
  285. final int bytesToWrite = Math.min(maxBytesToCopy, (audioData.length - offset));
  286. mCallback.audioAvailable(audioData, offset, bytesToWrite);
  287. offset += bytesToWrite;
  288. }
  289. }
  290. @Override
  291. public void onSynthDataComplete() {
  292. mCallback.done();
  293. }
  294. };
  295. }