eSpeak NG is an open source speech synthesizer that supports more than hundred languages and accents.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

speech.c 24KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886
  1. /*
  2. * Copyright (C) 2005 to 2013 by Jonathan Duddington
  3. * email: [email protected]
  4. * Copyright (C) 2013-2016 Reece H. Dunn
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 3 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see: <http://www.gnu.org/licenses/>.
  18. */
  19. #include "config.h"
  20. #include <assert.h>
  21. #include <ctype.h>
  22. #include <errno.h>
  23. #include <locale.h>
  24. #include <stdbool.h>
  25. #include <stdint.h>
  26. #include <stdio.h>
  27. #include <stdlib.h>
  28. #include <string.h>
  29. #include <sys/stat.h>
  30. #include <time.h>
  31. #include <unistd.h>
  32. #include <wchar.h>
  33. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  34. #include <pcaudiolib/audio.h>
  35. #endif
  36. #if defined(_WIN32) || defined(_WIN64)
  37. #include <fcntl.h>
  38. #include <io.h>
  39. #include <windows.h>
  40. #include <winreg.h>
  41. #endif
  42. #include <espeak-ng/espeak_ng.h>
  43. #include <espeak-ng/speak_lib.h>
  44. #include "speech.h"
  45. #include "phoneme.h"
  46. #include "synthesize.h"
  47. #include "voice.h"
  48. #include "translate.h"
  49. #include "espeak_command.h"
  50. #include "fifo.h"
  51. #include "event.h"
  52. unsigned char *outbuf = NULL;
  53. espeak_EVENT *event_list = NULL;
  54. int event_list_ix = 0;
  55. int n_event_list;
  56. long count_samples;
  57. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  58. struct audio_object *my_audio = NULL;
  59. #endif
  60. static const char *option_device = NULL;
  61. static unsigned int my_unique_identifier = 0;
  62. static void *my_user_data = NULL;
  63. static espeak_ng_OUTPUT_MODE my_mode = ENOUTPUT_MODE_SYNCHRONOUS;
  64. static int out_samplerate = 0;
  65. static int voice_samplerate = 22050;
  66. static espeak_ng_STATUS err = ENS_OK;
  67. t_espeak_callback *synth_callback = NULL;
  68. int (*uri_callback)(int, const char *, const char *) = NULL;
  69. int (*phoneme_callback)(const char *) = NULL;
  70. char path_home[N_PATH_HOME]; // this is the espeak-ng-data directory
  71. extern int saved_parameters[N_SPEECH_PARAM]; // Parameters saved on synthesis start
  72. static int dispatch_audio(short *outbuf, int length, espeak_EVENT *event)
  73. {
  74. int a_wave_can_be_played = 1;
  75. #ifdef USE_ASYNC
  76. if ((my_mode & ENOUTPUT_MODE_SYNCHRONOUS) == 0)
  77. a_wave_can_be_played = fifo_is_command_enabled();
  78. #endif
  79. switch ((int)my_mode)
  80. {
  81. case ENOUTPUT_MODE_SPEAK_AUDIO:
  82. case ENOUTPUT_MODE_SPEAK_AUDIO | ENOUTPUT_MODE_SYNCHRONOUS:
  83. {
  84. int event_type = 0;
  85. if (event)
  86. event_type = event->type;
  87. if (event_type == espeakEVENT_SAMPLERATE) {
  88. voice_samplerate = event->id.number;
  89. if (out_samplerate != voice_samplerate) {
  90. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  91. if (out_samplerate != 0) {
  92. // sound was previously open with a different sample rate
  93. audio_object_close(my_audio);
  94. #ifdef HAVE_SLEEP
  95. sleep(1);
  96. #endif
  97. }
  98. #endif
  99. out_samplerate = voice_samplerate;
  100. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  101. int error = audio_object_open(my_audio, AUDIO_OBJECT_FORMAT_S16LE, voice_samplerate, 1);
  102. if (error != 0) {
  103. fprintf(stderr, "error: %s\n", audio_object_strerror(my_audio, error));
  104. err = ENS_AUDIO_ERROR;
  105. return -1;
  106. }
  107. #endif
  108. #ifdef USE_ASYNC
  109. if ((my_mode & ENOUTPUT_MODE_SYNCHRONOUS) == 0)
  110. event_init();
  111. #endif
  112. }
  113. }
  114. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  115. if (outbuf && length && a_wave_can_be_played) {
  116. int error = audio_object_write(my_audio, (char *)outbuf, 2*length);
  117. if (error != 0)
  118. fprintf(stderr, "error: %s\n", audio_object_strerror(my_audio, error));
  119. }
  120. #endif
  121. #ifdef USE_ASYNC
  122. while (event && a_wave_can_be_played) {
  123. // TBD: some event are filtered here but some insight might be given
  124. // TBD: in synthesise.cpp for avoiding to create WORDs with size=0.
  125. // TBD: For example sentence "or ALT)." returns three words
  126. // "or", "ALT" and "".
  127. // TBD: the last one has its size=0.
  128. if ((event->type == espeakEVENT_WORD) && (event->length == 0))
  129. break;
  130. if ((my_mode & ENOUTPUT_MODE_SYNCHRONOUS) == 0) {
  131. err = event_declare(event);
  132. if (err != ENS_EVENT_BUFFER_FULL)
  133. break;
  134. usleep(10000);
  135. a_wave_can_be_played = fifo_is_command_enabled();
  136. } else
  137. break;
  138. }
  139. #endif
  140. }
  141. break;
  142. case 0:
  143. if (synth_callback)
  144. synth_callback(outbuf, length, event);
  145. break;
  146. }
  147. return a_wave_can_be_played == 0; // 1 = stop synthesis, -1 = error
  148. }
  149. static int create_events(short *outbuf, int length, espeak_EVENT *event_list)
  150. {
  151. int finished;
  152. int i = 0;
  153. // The audio data are written to the output device.
  154. // The list of events in event_list (index: event_list_ix) is read:
  155. // Each event is declared to the "event" object which stores them internally.
  156. // The event object is responsible of calling the external callback
  157. // as soon as the relevant audio sample is played.
  158. do { // for each event
  159. espeak_EVENT *event;
  160. if (event_list_ix == 0)
  161. event = NULL;
  162. else
  163. event = event_list + i;
  164. finished = dispatch_audio((short *)outbuf, length, event);
  165. length = 0; // the wave data are played once.
  166. i++;
  167. } while ((i < event_list_ix) && !finished);
  168. return finished;
  169. }
  170. #ifdef USE_ASYNC
  171. int sync_espeak_terminated_msg(uint32_t unique_identifier, void *user_data)
  172. {
  173. int finished = 0;
  174. memset(event_list, 0, 2*sizeof(espeak_EVENT));
  175. event_list[0].type = espeakEVENT_MSG_TERMINATED;
  176. event_list[0].unique_identifier = unique_identifier;
  177. event_list[0].user_data = user_data;
  178. event_list[1].type = espeakEVENT_LIST_TERMINATED;
  179. event_list[1].unique_identifier = unique_identifier;
  180. event_list[1].user_data = user_data;
  181. if (my_mode == ENOUTPUT_MODE_SPEAK_AUDIO) {
  182. while (1) {
  183. err = event_declare(event_list);
  184. if (err != ENS_EVENT_BUFFER_FULL)
  185. break;
  186. usleep(10000);
  187. }
  188. } else if (synth_callback)
  189. finished = synth_callback(NULL, 0, event_list);
  190. return finished;
  191. }
  192. #endif
  193. static int check_data_path(const char *path, int allow_directory)
  194. {
  195. if (!path) return 0;
  196. snprintf(path_home, sizeof(path_home), "%s/espeak-ng-data", path);
  197. if (GetFileLength(path_home) == -2)
  198. return 1;
  199. if (!allow_directory)
  200. return 0;
  201. snprintf(path_home, sizeof(path_home), "%s", path);
  202. return GetFileLength(path_home) == -2;
  203. }
  204. #pragma GCC visibility push(default)
  205. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_InitializeOutput(espeak_ng_OUTPUT_MODE output_mode, int buffer_length, const char *device)
  206. {
  207. option_device = device;
  208. my_mode = output_mode;
  209. out_samplerate = 0;
  210. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  211. my_audio = create_audio_device_object(device, "eSpeak", "Text-to-Speech");
  212. #endif
  213. // buffer_length is in mS, allocate 2 bytes per sample
  214. if (buffer_length == 0)
  215. buffer_length = 60;
  216. outbuf_size = (buffer_length * samplerate)/500;
  217. out_start = (unsigned char *)realloc(outbuf, outbuf_size);
  218. if (out_start == NULL)
  219. return ENOMEM;
  220. else
  221. outbuf = out_start;
  222. // allocate space for event list. Allow 200 events per second.
  223. // Add a constant to allow for very small buffer_length
  224. n_event_list = (buffer_length*200)/1000 + 20;
  225. espeak_EVENT *new_event_list = (espeak_EVENT *)realloc(event_list, sizeof(espeak_EVENT) * n_event_list);
  226. if (new_event_list == NULL)
  227. return ENOMEM;
  228. event_list = new_event_list;
  229. return ENS_OK;
  230. }
  231. int GetFileLength(const char *filename)
  232. {
  233. struct stat statbuf;
  234. if (stat(filename, &statbuf) != 0)
  235. return 0;
  236. if (S_ISDIR(statbuf.st_mode))
  237. return -2; // a directory
  238. return statbuf.st_size;
  239. }
  240. ESPEAK_NG_API void espeak_ng_InitializePath(const char *path)
  241. {
  242. if (check_data_path(path, 1))
  243. return;
  244. #ifdef PLATFORM_WINDOWS
  245. HKEY RegKey;
  246. unsigned long size;
  247. unsigned long var_type;
  248. unsigned char buf[sizeof(path_home)-13];
  249. if (check_data_path(getenv("ESPEAK_DATA_PATH"), 1))
  250. return;
  251. buf[0] = 0;
  252. RegOpenKeyExA(HKEY_LOCAL_MACHINE, "Software\\eSpeak NG", 0, KEY_READ, &RegKey);
  253. if (RegKey == NULL)
  254. RegOpenKeyExA(HKEY_LOCAL_MACHINE, "Software\\WOW6432Node\\eSpeak NG", 0, KEY_READ, &RegKey);
  255. size = sizeof(buf);
  256. var_type = REG_SZ;
  257. RegQueryValueExA(RegKey, "Path", 0, &var_type, buf, &size);
  258. if (check_data_path(buf, 1))
  259. return;
  260. #elif !defined(PLATFORM_DOS)
  261. if (check_data_path(getenv("ESPEAK_DATA_PATH"), 1))
  262. return;
  263. if (check_data_path(getenv("HOME"), 0))
  264. return;
  265. #endif
  266. strcpy(path_home, PATH_ESPEAK_DATA);
  267. }
  268. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Initialize(espeak_ng_ERROR_CONTEXT *context)
  269. {
  270. int param;
  271. int srate = 22050; // default sample rate 22050 Hz
  272. // It seems that the wctype functions don't work until the locale has been set
  273. // to something other than the default "C". Then, not only Latin1 but also the
  274. // other characters give the correct results with iswalpha() etc.
  275. if (setlocale(LC_CTYPE, "C.UTF-8") == NULL) {
  276. if (setlocale(LC_CTYPE, "UTF-8") == NULL) {
  277. if (setlocale(LC_CTYPE, "en_US.UTF-8") == NULL)
  278. setlocale(LC_CTYPE, "");
  279. }
  280. }
  281. espeak_ng_STATUS result = LoadPhData(&srate, context);
  282. if (result != ENS_OK)
  283. return result;
  284. WavegenInit(srate, 0);
  285. LoadConfig();
  286. memset(&current_voice_selected, 0, sizeof(current_voice_selected));
  287. SetVoiceStack(NULL, "");
  288. SynthesizeInit();
  289. InitNamedata();
  290. VoiceReset(0);
  291. for (param = 0; param < N_SPEECH_PARAM; param++)
  292. param_stack[0].parameter[param] = saved_parameters[param] = param_defaults[param];
  293. SetParameter(espeakRATE, 175, 0);
  294. SetParameter(espeakVOLUME, 100, 0);
  295. SetParameter(espeakCAPITALS, option_capitals, 0);
  296. SetParameter(espeakPUNCTUATION, option_punctuation, 0);
  297. SetParameter(espeakWORDGAP, 0, 0);
  298. #ifdef USE_ASYNC
  299. fifo_init();
  300. #endif
  301. option_phonemes = 0;
  302. option_phoneme_events = 0;
  303. return ENS_OK;
  304. }
  305. ESPEAK_NG_API int espeak_ng_GetSampleRate(void)
  306. {
  307. return samplerate;
  308. }
  309. #pragma GCC visibility pop
  310. static espeak_ng_STATUS Synthesize(unsigned int unique_identifier, const void *text, int flags)
  311. {
  312. // Fill the buffer with output sound
  313. int length;
  314. int finished = 0;
  315. int count_buffers = 0;
  316. if ((outbuf == NULL) || (event_list == NULL))
  317. return ENS_NOT_INITIALIZED;
  318. option_multibyte = flags & 7;
  319. option_ssml = flags & espeakSSML;
  320. option_phoneme_input = flags & espeakPHONEMES;
  321. option_endpause = flags & espeakENDPAUSE;
  322. count_samples = 0;
  323. if (translator == NULL)
  324. espeak_SetVoiceByName("default");
  325. SpeakNextClause(NULL, text, 0);
  326. for (;;) {
  327. out_ptr = outbuf;
  328. out_end = &outbuf[outbuf_size];
  329. event_list_ix = 0;
  330. WavegenFill();
  331. length = (out_ptr - outbuf)/2;
  332. count_samples += length;
  333. event_list[event_list_ix].type = espeakEVENT_LIST_TERMINATED; // indicates end of event list
  334. event_list[event_list_ix].unique_identifier = unique_identifier;
  335. event_list[event_list_ix].user_data = my_user_data;
  336. count_buffers++;
  337. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
  338. finished = create_events((short *)outbuf, length, event_list);
  339. if (finished < 0)
  340. return ENS_AUDIO_ERROR;
  341. } else if (synth_callback)
  342. finished = synth_callback((short *)outbuf, length, event_list);
  343. if (finished) {
  344. SpeakNextClause(NULL, 0, 2); // stop
  345. return ENS_SPEECH_STOPPED;
  346. }
  347. if (Generate(phoneme_list, &n_phoneme_list, 1) == 0) {
  348. if (WcmdqUsed() == 0) {
  349. // don't process the next clause until the previous clause has finished generating speech.
  350. // This ensures that <audio> tag (which causes end-of-clause) is at a sound buffer boundary
  351. event_list[0].type = espeakEVENT_LIST_TERMINATED;
  352. event_list[0].unique_identifier = my_unique_identifier;
  353. event_list[0].user_data = my_user_data;
  354. if (SpeakNextClause(NULL, NULL, 1) == 0) {
  355. finished = 0;
  356. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
  357. if (dispatch_audio(NULL, 0, NULL) < 0)
  358. return ENS_AUDIO_ERROR;
  359. } else if (synth_callback)
  360. finished = synth_callback(NULL, 0, event_list); // NULL buffer ptr indicates end of data
  361. if (finished) {
  362. SpeakNextClause(NULL, 0, 2); // stop
  363. return ENS_SPEECH_STOPPED;
  364. }
  365. return ENS_OK;
  366. }
  367. }
  368. }
  369. }
  370. }
  371. void MarkerEvent(int type, unsigned int char_position, int value, int value2, unsigned char *out_ptr)
  372. {
  373. // type: 1=word, 2=sentence, 3=named mark, 4=play audio, 5=end, 7=phoneme
  374. espeak_EVENT *ep;
  375. double time;
  376. if ((event_list == NULL) || (event_list_ix >= (n_event_list-2)))
  377. return;
  378. ep = &event_list[event_list_ix++];
  379. ep->type = (espeak_EVENT_TYPE)type;
  380. ep->unique_identifier = my_unique_identifier;
  381. ep->user_data = my_user_data;
  382. ep->text_position = char_position & 0xffffff;
  383. ep->length = char_position >> 24;
  384. time = ((double)(count_samples + mbrola_delay + (out_ptr - out_start)/2)*1000.0)/samplerate;
  385. ep->audio_position = (int)time;
  386. ep->sample = (count_samples + mbrola_delay + (out_ptr - out_start)/2);
  387. if ((type == espeakEVENT_MARK) || (type == espeakEVENT_PLAY))
  388. ep->id.name = &namedata[value];
  389. else if (type == espeakEVENT_PHONEME) {
  390. int *p;
  391. p = (int *)(ep->id.string);
  392. p[0] = value;
  393. p[1] = value2;
  394. } else
  395. ep->id.number = value;
  396. }
  397. espeak_ng_STATUS sync_espeak_Synth(unsigned int unique_identifier, const void *text,
  398. unsigned int position, espeak_POSITION_TYPE position_type,
  399. unsigned int end_position, unsigned int flags, void *user_data)
  400. {
  401. InitText(flags);
  402. my_unique_identifier = unique_identifier;
  403. my_user_data = user_data;
  404. for (int i = 0; i < N_SPEECH_PARAM; i++)
  405. saved_parameters[i] = param_stack[0].parameter[i];
  406. switch (position_type)
  407. {
  408. case POS_CHARACTER:
  409. skip_characters = position;
  410. break;
  411. case POS_WORD:
  412. skip_words = position;
  413. break;
  414. case POS_SENTENCE:
  415. skip_sentences = position;
  416. break;
  417. }
  418. if (skip_characters || skip_words || skip_sentences)
  419. skipping_text = 1;
  420. end_character_position = end_position;
  421. espeak_ng_STATUS aStatus = Synthesize(unique_identifier, text, flags);
  422. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  423. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
  424. int error = (aStatus == ENS_SPEECH_STOPPED)
  425. ? audio_object_flush(my_audio)
  426. : audio_object_drain(my_audio);
  427. if (error != 0)
  428. fprintf(stderr, "error: %s\n", audio_object_strerror(my_audio, error));
  429. }
  430. #endif
  431. return aStatus;
  432. }
  433. espeak_ng_STATUS sync_espeak_Synth_Mark(unsigned int unique_identifier, const void *text,
  434. const char *index_mark, unsigned int end_position,
  435. unsigned int flags, void *user_data)
  436. {
  437. InitText(flags);
  438. my_unique_identifier = unique_identifier;
  439. my_user_data = user_data;
  440. if (index_mark != NULL) {
  441. strncpy0(skip_marker, index_mark, sizeof(skip_marker));
  442. skipping_text = 1;
  443. }
  444. end_character_position = end_position;
  445. return Synthesize(unique_identifier, text, flags | espeakSSML);
  446. }
  447. espeak_ng_STATUS sync_espeak_Key(const char *key)
  448. {
  449. // symbolic name, symbolicname_character - is there a system resource of symbolic names per language?
  450. int letter;
  451. int ix;
  452. ix = utf8_in(&letter, key);
  453. if (key[ix] == 0) // a single character
  454. return sync_espeak_Char(letter);
  455. my_unique_identifier = 0;
  456. my_user_data = NULL;
  457. return Synthesize(0, key, 0); // speak key as a text string
  458. }
  459. espeak_ng_STATUS sync_espeak_Char(wchar_t character)
  460. {
  461. // is there a system resource of character names per language?
  462. char buf[80];
  463. my_unique_identifier = 0;
  464. my_user_data = NULL;
  465. sprintf(buf, "<say-as interpret-as=\"tts:char\">&#%d;</say-as>", character);
  466. return Synthesize(0, buf, espeakSSML);
  467. }
  468. void sync_espeak_SetPunctuationList(const wchar_t *punctlist)
  469. {
  470. // Set the list of punctuation which are spoken for "some".
  471. my_unique_identifier = 0;
  472. my_user_data = NULL;
  473. option_punctlist[0] = 0;
  474. if (punctlist != NULL) {
  475. wcsncpy(option_punctlist, punctlist, N_PUNCTLIST);
  476. option_punctlist[N_PUNCTLIST-1] = 0;
  477. }
  478. }
  479. #pragma GCC visibility push(default)
  480. ESPEAK_API void espeak_SetSynthCallback(t_espeak_callback *SynthCallback)
  481. {
  482. synth_callback = SynthCallback;
  483. #ifdef USE_ASYNC
  484. event_set_callback(synth_callback);
  485. #endif
  486. }
  487. ESPEAK_API void espeak_SetUriCallback(int (*UriCallback)(int, const char *, const char *))
  488. {
  489. uri_callback = UriCallback;
  490. }
  491. ESPEAK_API void espeak_SetPhonemeCallback(int (*PhonemeCallback)(const char *))
  492. {
  493. phoneme_callback = PhonemeCallback;
  494. }
  495. ESPEAK_NG_API espeak_ng_STATUS
  496. espeak_ng_Synthesize(const void *text, size_t size,
  497. unsigned int position,
  498. espeak_POSITION_TYPE position_type,
  499. unsigned int end_position, unsigned int flags,
  500. unsigned int *unique_identifier, void *user_data)
  501. {
  502. (void)size; // unused in non-async modes
  503. static unsigned int temp_identifier;
  504. if (unique_identifier == NULL)
  505. unique_identifier = &temp_identifier;
  506. *unique_identifier = 0;
  507. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  508. return sync_espeak_Synth(0, text, position, position_type, end_position, flags, user_data);
  509. #ifdef USE_ASYNC
  510. // Create the text command
  511. t_espeak_command *c1 = create_espeak_text(text, size, position, position_type, end_position, flags, user_data);
  512. if (c1) {
  513. // Retrieve the unique identifier
  514. *unique_identifier = c1->u.my_text.unique_identifier;
  515. }
  516. // Create the "terminated msg" command (same uid)
  517. t_espeak_command *c2 = create_espeak_terminated_msg(*unique_identifier, user_data);
  518. // Try to add these 2 commands (single transaction)
  519. if (c1 && c2) {
  520. espeak_ng_STATUS status = fifo_add_commands(c1, c2);
  521. if (status != ENS_OK) {
  522. delete_espeak_command(c1);
  523. delete_espeak_command(c2);
  524. }
  525. return status;
  526. }
  527. delete_espeak_command(c1);
  528. delete_espeak_command(c2);
  529. return ENOMEM;
  530. #else
  531. return sync_espeak_Synth(0, text, position, position_type, end_position, flags, user_data);
  532. #endif
  533. }
  534. ESPEAK_NG_API espeak_ng_STATUS
  535. espeak_ng_SynthesizeMark(const void *text,
  536. size_t size,
  537. const char *index_mark,
  538. unsigned int end_position,
  539. unsigned int flags,
  540. unsigned int *unique_identifier,
  541. void *user_data)
  542. {
  543. (void)size; // unused in non-async modes
  544. static unsigned int temp_identifier;
  545. if (unique_identifier == NULL)
  546. unique_identifier = &temp_identifier;
  547. *unique_identifier = 0;
  548. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  549. return sync_espeak_Synth_Mark(0, text, index_mark, end_position, flags, user_data);
  550. #ifdef USE_ASYNC
  551. // Create the mark command
  552. t_espeak_command *c1 = create_espeak_mark(text, size, index_mark, end_position,
  553. flags, user_data);
  554. if (c1) {
  555. // Retrieve the unique identifier
  556. *unique_identifier = c1->u.my_mark.unique_identifier;
  557. }
  558. // Create the "terminated msg" command (same uid)
  559. t_espeak_command *c2 = create_espeak_terminated_msg(*unique_identifier, user_data);
  560. // Try to add these 2 commands (single transaction)
  561. if (c1 && c2) {
  562. espeak_ng_STATUS status = fifo_add_commands(c1, c2);
  563. if (status != ENS_OK) {
  564. delete_espeak_command(c1);
  565. delete_espeak_command(c2);
  566. }
  567. return status;
  568. }
  569. delete_espeak_command(c1);
  570. delete_espeak_command(c2);
  571. return ENOMEM;
  572. #else
  573. return sync_espeak_Synth_Mark(0, text, index_mark, end_position, flags, user_data);
  574. #endif
  575. }
  576. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SpeakKeyName(const char *key_name)
  577. {
  578. // symbolic name, symbolicname_character - is there a system resource of symbolicnames per language
  579. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  580. return sync_espeak_Key(key_name);
  581. #ifdef USE_ASYNC
  582. t_espeak_command *c = create_espeak_key(key_name, NULL);
  583. espeak_ng_STATUS status = fifo_add_command(c);
  584. if (status != ENS_OK)
  585. delete_espeak_command(c);
  586. return status;
  587. #else
  588. return sync_espeak_Key(key_name);
  589. #endif
  590. }
  591. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SpeakCharacter(wchar_t character)
  592. {
  593. // is there a system resource of character names per language?
  594. #ifdef USE_ASYNC
  595. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  596. return sync_espeak_Char(character);
  597. t_espeak_command *c = create_espeak_char(character, NULL);
  598. espeak_ng_STATUS status = fifo_add_command(c);
  599. if (status != ENS_OK)
  600. delete_espeak_command(c);
  601. return status;
  602. #else
  603. return sync_espeak_Char(character);
  604. #endif
  605. }
  606. ESPEAK_API int espeak_GetParameter(espeak_PARAMETER parameter, int current)
  607. {
  608. // current: 0=default value, 1=current value
  609. if (current)
  610. return param_stack[0].parameter[parameter];
  611. return param_defaults[parameter];
  612. }
  613. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SetParameter(espeak_PARAMETER parameter, int value, int relative)
  614. {
  615. #ifdef USE_ASYNC
  616. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  617. return SetParameter(parameter, value, relative);
  618. t_espeak_command *c = create_espeak_parameter(parameter, value, relative);
  619. espeak_ng_STATUS status = fifo_add_command(c);
  620. if (status != ENS_OK)
  621. delete_espeak_command(c);
  622. return status;
  623. #else
  624. return SetParameter(parameter, value, relative);
  625. #endif
  626. }
  627. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SetPunctuationList(const wchar_t *punctlist)
  628. {
  629. // Set the list of punctuation which are spoken for "some".
  630. #ifdef USE_ASYNC
  631. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS) {
  632. sync_espeak_SetPunctuationList(punctlist);
  633. return ENS_OK;
  634. }
  635. t_espeak_command *c = create_espeak_punctuation_list(punctlist);
  636. espeak_ng_STATUS status = fifo_add_command(c);
  637. if (status != ENS_OK)
  638. delete_espeak_command(c);
  639. return status;
  640. #else
  641. sync_espeak_SetPunctuationList(punctlist);
  642. return ENS_OK;
  643. #endif
  644. }
  645. ESPEAK_API void espeak_SetPhonemeTrace(int phonememode, FILE *stream)
  646. {
  647. /* phonememode: Controls the output of phoneme symbols for the text
  648. bits 0-2:
  649. value=0 No phoneme output (default)
  650. value=1 Output the translated phoneme symbols for the text
  651. value=2 as (1), but produces IPA phoneme names rather than ascii
  652. bit 3: output a trace of how the translation was done (showing the matching rules and list entries)
  653. bit 4: produce pho data for mbrola
  654. bit 7: use (bits 8-23) as a tie within multi-letter phonemes names
  655. bits 8-23: separator character, between phoneme names
  656. stream output stream for the phoneme symbols (and trace). If stream=NULL then it uses stdout.
  657. */
  658. option_phonemes = phonememode;
  659. f_trans = stream;
  660. if (stream == NULL)
  661. f_trans = stderr;
  662. }
  663. ESPEAK_API const char *espeak_TextToPhonemes(const void **textptr, int textmode, int phonememode)
  664. {
  665. /* phoneme_mode
  666. bit 1: 0=eSpeak's ascii phoneme names, 1= International Phonetic Alphabet (as UTF-8 characters).
  667. bit 7: use (bits 8-23) as a tie within multi-letter phonemes names
  668. bits 8-23: separator character, between phoneme names
  669. */
  670. option_multibyte = textmode & 7;
  671. *textptr = TranslateClause(translator, NULL, *textptr, NULL, NULL);
  672. return GetTranslatedPhonemeString(phonememode);
  673. }
  674. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Cancel(void)
  675. {
  676. #ifdef USE_ASYNC
  677. fifo_stop();
  678. event_clear_all();
  679. #endif
  680. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  681. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO)
  682. audio_object_flush(my_audio);
  683. #endif
  684. embedded_value[EMBED_T] = 0; // reset echo for pronunciation announcements
  685. for (int i = 0; i < N_SPEECH_PARAM; i++)
  686. SetParameter(i, saved_parameters[i], 0);
  687. return ENS_OK;
  688. }
  689. ESPEAK_API int espeak_IsPlaying(void)
  690. {
  691. #ifdef USE_ASYNC
  692. return fifo_is_busy();
  693. #else
  694. return 0;
  695. #endif
  696. }
  697. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Synchronize(void)
  698. {
  699. espeak_ng_STATUS berr = err;
  700. #ifdef USE_ASYNC
  701. while (espeak_IsPlaying())
  702. usleep(20000);
  703. #endif
  704. err = ENS_OK;
  705. return berr;
  706. }
  707. extern void FreePhData(void);
  708. extern void FreeVoiceList(void);
  709. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Terminate(void)
  710. {
  711. #ifdef USE_ASYNC
  712. fifo_stop();
  713. fifo_terminate();
  714. event_terminate();
  715. #endif
  716. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
  717. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  718. audio_object_close(my_audio);
  719. audio_object_destroy(my_audio);
  720. #endif
  721. out_samplerate = 0;
  722. }
  723. free(event_list);
  724. event_list = NULL;
  725. free(outbuf);
  726. outbuf = NULL;
  727. FreePhData();
  728. FreeVoiceList();
  729. return ENS_OK;
  730. }
  731. ESPEAK_API const char *espeak_Info(const char **ptr)
  732. {
  733. if (ptr != NULL)
  734. *ptr = path_home;
  735. return version_string;
  736. }
  737. #pragma GCC visibility pop