eSpeak NG is an open source speech synthesizer that supports more than hundred languages and accents.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

speech.c 24KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907
  1. /*
  2. * Copyright (C) 2005 to 2013 by Jonathan Duddington
  3. * email: [email protected]
  4. * Copyright (C) 2013-2016 Reece H. Dunn
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 3 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see: <http://www.gnu.org/licenses/>.
  18. */
  19. #include "config.h"
  20. #include "errno.h"
  21. #include "stdio.h"
  22. #include "ctype.h"
  23. #include "string.h"
  24. #include "stdlib.h"
  25. #include "wchar.h"
  26. #include "locale.h"
  27. #include <assert.h>
  28. #include <time.h>
  29. #include <espeak-ng/espeak_ng.h>
  30. #include <espeak/speak_lib.h>
  31. #include "speech.h"
  32. #include <sys/stat.h>
  33. #ifdef PLATFORM_WINDOWS
  34. #include <fcntl.h>
  35. #include <io.h>
  36. #include <windows.h>
  37. #include <winreg.h>
  38. #else /* PLATFORM_POSIX */
  39. #include <unistd.h>
  40. #endif
  41. #include "phoneme.h"
  42. #include "synthesize.h"
  43. #include "voice.h"
  44. #include "translate.h"
  45. #include "fifo.h"
  46. #include "event.h"
  47. #include "wave.h"
  48. #ifndef S_ISDIR
  49. #define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR)
  50. #endif
  51. unsigned char *outbuf = NULL;
  52. espeak_EVENT *event_list = NULL;
  53. int event_list_ix = 0;
  54. int n_event_list;
  55. long count_samples;
  56. void *my_audio = NULL;
  57. static const char *option_device = NULL;
  58. static unsigned int my_unique_identifier = 0;
  59. static void *my_user_data = NULL;
  60. static espeak_ng_OUTPUT_MODE my_mode = ENOUTPUT_MODE_SYNCHRONOUS;
  61. static int out_samplerate = 0;
  62. static int voice_samplerate = 22050;
  63. static espeak_ng_STATUS err = ENS_OK;
  64. t_espeak_callback *synth_callback = NULL;
  65. int (*uri_callback)(int, const char *, const char *) = NULL;
  66. int (*phoneme_callback)(const char *) = NULL;
  67. char path_home[N_PATH_HOME]; // this is the espeak-data directory
  68. extern int saved_parameters[N_SPEECH_PARAM]; // Parameters saved on synthesis start
  69. void WVoiceChanged(voice_t *wvoice)
  70. {
  71. // Voice change in wavegen
  72. voice_samplerate = wvoice->samplerate;
  73. }
  74. #ifdef USE_ASYNC
  75. static int dispatch_audio(short *outbuf, int length, espeak_EVENT *event)
  76. {
  77. int a_wave_can_be_played = fifo_is_command_enabled();
  78. switch (my_mode)
  79. {
  80. case ENOUTPUT_MODE_SPEAK_AUDIO:
  81. {
  82. int event_type = 0;
  83. if (event)
  84. event_type = event->type;
  85. if (event_type == espeakEVENT_SAMPLERATE) {
  86. voice_samplerate = event->id.number;
  87. if (out_samplerate != voice_samplerate) {
  88. if (out_samplerate != 0) {
  89. // sound was previously open with a different sample rate
  90. wave_close(my_audio);
  91. sleep(1);
  92. }
  93. out_samplerate = voice_samplerate;
  94. my_audio = wave_open(voice_samplerate, option_device);
  95. if (!my_audio) {
  96. err = ENS_AUDIO_ERROR;
  97. return -1;
  98. }
  99. wave_set_callback_is_output_enabled(fifo_is_command_enabled);
  100. event_init();
  101. }
  102. }
  103. if (outbuf && length && a_wave_can_be_played) {
  104. wave_write(my_audio, (char *)outbuf, 2*length);
  105. }
  106. while (event && a_wave_can_be_played) {
  107. // TBD: some event are filtered here but some insight might be given
  108. // TBD: in synthesise.cpp for avoiding to create WORDs with size=0.
  109. // TBD: For example sentence "or ALT)." returns three words
  110. // "or", "ALT" and "".
  111. // TBD: the last one has its size=0.
  112. if ((event->type == espeakEVENT_WORD) && (event->length == 0))
  113. break;
  114. err = event_declare(event);
  115. if (err != ENS_EVENT_BUFFER_FULL)
  116. break;
  117. usleep(10000);
  118. a_wave_can_be_played = fifo_is_command_enabled();
  119. }
  120. }
  121. break;
  122. case 0:
  123. if (synth_callback)
  124. synth_callback(outbuf, length, event);
  125. break;
  126. }
  127. return a_wave_can_be_played == 0; // 1 = stop synthesis, -1 = error
  128. }
  129. static int create_events(short *outbuf, int length, espeak_EVENT *event_list, uint32_t the_write_pos)
  130. {
  131. int finished;
  132. int i = 0;
  133. // The audio data are written to the output device.
  134. // The list of events in event_list (index: event_list_ix) is read:
  135. // Each event is declared to the "event" object which stores them internally.
  136. // The event object is responsible of calling the external callback
  137. // as soon as the relevant audio sample is played.
  138. do { // for each event
  139. espeak_EVENT *event;
  140. if (event_list_ix == 0)
  141. event = NULL;
  142. else {
  143. event = event_list + i;
  144. event->sample += the_write_pos;
  145. }
  146. finished = dispatch_audio((short *)outbuf, length, event);
  147. length = 0; // the wave data are played once.
  148. i++;
  149. } while ((i < event_list_ix) && !finished);
  150. return finished;
  151. }
  152. int sync_espeak_terminated_msg(uint32_t unique_identifier, void *user_data)
  153. {
  154. int finished = 0;
  155. memset(event_list, 0, 2*sizeof(espeak_EVENT));
  156. event_list[0].type = espeakEVENT_MSG_TERMINATED;
  157. event_list[0].unique_identifier = unique_identifier;
  158. event_list[0].user_data = user_data;
  159. event_list[1].type = espeakEVENT_LIST_TERMINATED;
  160. event_list[1].unique_identifier = unique_identifier;
  161. event_list[1].user_data = user_data;
  162. if (my_mode == ENOUTPUT_MODE_SPEAK_AUDIO) {
  163. while (1) {
  164. err = event_declare(event_list);
  165. if (err != ENS_EVENT_BUFFER_FULL)
  166. break;
  167. usleep(10000);
  168. }
  169. } else {
  170. if (synth_callback)
  171. finished = synth_callback(NULL, 0, event_list);
  172. }
  173. return finished;
  174. }
  175. #endif
  176. #pragma GCC visibility push(default)
  177. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_InitializeOutput(espeak_ng_OUTPUT_MODE output_mode, int buffer_length, const char *device)
  178. {
  179. option_device = device;
  180. my_mode = output_mode;
  181. my_audio = NULL;
  182. option_waveout = 1; // inhibit portaudio callback from wavegen.cpp
  183. out_samplerate = 0;
  184. if (output_mode == (ENOUTPUT_MODE_SYNCHRONOUS | ENOUTPUT_MODE_SPEAK_AUDIO)) {
  185. option_waveout = 0;
  186. WavegenInitSound();
  187. }
  188. // buflength is in mS, allocate 2 bytes per sample
  189. if ((buffer_length == 0) || (output_mode & ENOUTPUT_MODE_SPEAK_AUDIO))
  190. buffer_length = 200;
  191. outbuf_size = (buffer_length * samplerate)/500;
  192. out_start = (unsigned char *)realloc(outbuf, outbuf_size);
  193. if (out_start == NULL)
  194. return ENOMEM;
  195. else
  196. outbuf = out_start;
  197. // allocate space for event list. Allow 200 events per second.
  198. // Add a constant to allow for very small buf_length
  199. n_event_list = (buffer_length*200)/1000 + 20;
  200. if ((event_list = (espeak_EVENT *)realloc(event_list, sizeof(espeak_EVENT) * n_event_list)) == NULL)
  201. return ENOMEM;
  202. return ENS_OK;
  203. }
  204. int GetFileLength(const char *filename)
  205. {
  206. struct stat statbuf;
  207. if (stat(filename, &statbuf) != 0)
  208. return 0;
  209. if (S_ISDIR(statbuf.st_mode))
  210. return -2; // a directory
  211. return statbuf.st_size;
  212. }
  213. #pragma GCC visibility pop
  214. char *Alloc(int size)
  215. {
  216. char *p;
  217. if ((p = (char *)malloc(size)) == NULL)
  218. fprintf(stderr, "Can't allocate memory\n"); // I was told that size+1 fixes a crash on 64-bit systems
  219. return p;
  220. }
  221. void Free(void *ptr)
  222. {
  223. if (ptr != NULL)
  224. free(ptr);
  225. }
  226. #pragma GCC visibility push(default)
  227. ESPEAK_NG_API void espeak_ng_InitializePath(const char *path)
  228. {
  229. if (path != NULL) {
  230. sprintf(path_home, "%s/espeak-data", path);
  231. return;
  232. }
  233. #ifdef PLATFORM_WINDOWS
  234. HKEY RegKey;
  235. unsigned long size;
  236. unsigned long var_type;
  237. char *env;
  238. unsigned char buf[sizeof(path_home)-13];
  239. if ((env = getenv("ESPEAK_DATA_PATH")) != NULL) {
  240. sprintf(path_home, "%s/espeak-data", env);
  241. if (GetFileLength(path_home) == -2)
  242. return; // an espeak-data directory exists
  243. }
  244. buf[0] = 0;
  245. RegOpenKeyExA(HKEY_LOCAL_MACHINE, "Software\\Microsoft\\Speech\\Voices\\Tokens\\eSpeak", 0, KEY_READ, &RegKey);
  246. size = sizeof(buf);
  247. var_type = REG_SZ;
  248. RegQueryValueExA(RegKey, "path", 0, &var_type, buf, &size);
  249. sprintf(path_home, "%s\\espeak-data", buf);
  250. #elif defined(PLATFORM_DOS)
  251. strcpy(path_home, PATH_ESPEAK_DATA);
  252. #else
  253. char *env;
  254. // check for environment variable
  255. if ((env = getenv("ESPEAK_DATA_PATH")) != NULL) {
  256. snprintf(path_home, sizeof(path_home), "%s/espeak-data", env);
  257. if (GetFileLength(path_home) == -2)
  258. return; // an espeak-data directory exists
  259. }
  260. snprintf(path_home, sizeof(path_home), "%s/espeak-data", getenv("HOME"));
  261. if (access(path_home, R_OK) != 0)
  262. strcpy(path_home, PATH_ESPEAK_DATA);
  263. #endif
  264. }
  265. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Initialize(espeak_ng_ERROR_CONTEXT *context)
  266. {
  267. int param;
  268. int srate = 22050; // default sample rate 22050 Hz
  269. // It seems that the wctype functions don't work until the locale has been set
  270. // to something other than the default "C". Then, not only Latin1 but also the
  271. // other characters give the correct results with iswalpha() etc.
  272. if (setlocale(LC_CTYPE, "C.UTF-8") == NULL) {
  273. if (setlocale(LC_CTYPE, "UTF-8") == NULL) {
  274. if (setlocale(LC_CTYPE, "en_US.UTF-8") == NULL)
  275. setlocale(LC_CTYPE, "");
  276. }
  277. }
  278. espeak_ng_STATUS result = LoadPhData(&srate, context);
  279. if (result != ENS_OK)
  280. return result;
  281. WavegenInit(srate, 0);
  282. LoadConfig();
  283. memset(&current_voice_selected, 0, sizeof(current_voice_selected));
  284. SetVoiceStack(NULL, "");
  285. SynthesizeInit();
  286. InitNamedata();
  287. VoiceReset(0);
  288. for (param = 0; param < N_SPEECH_PARAM; param++)
  289. param_stack[0].parameter[param] = param_defaults[param];
  290. SetParameter(espeakRATE, 175, 0);
  291. SetParameter(espeakVOLUME, 100, 0);
  292. SetParameter(espeakCAPITALS, option_capitals, 0);
  293. SetParameter(espeakPUNCTUATION, option_punctuation, 0);
  294. SetParameter(espeakWORDGAP, 0, 0);
  295. #ifdef USE_ASYNC
  296. fifo_init();
  297. #endif
  298. option_phonemes = 0;
  299. option_phoneme_events = 0;
  300. return ENS_OK;
  301. }
  302. ESPEAK_NG_API int espeak_ng_GetSampleRate(void)
  303. {
  304. return samplerate;
  305. }
  306. #pragma GCC visibility pop
  307. static espeak_ng_STATUS Synthesize(unsigned int unique_identifier, const void *text, int flags)
  308. {
  309. // Fill the buffer with output sound
  310. int length;
  311. int finished = 0;
  312. int count_buffers = 0;
  313. #ifdef USE_ASYNC
  314. uint32_t a_write_pos = 0;
  315. #endif
  316. if ((outbuf == NULL) || (event_list == NULL))
  317. return ENS_NOT_INITIALIZED;
  318. option_multibyte = flags & 7;
  319. option_ssml = flags & espeakSSML;
  320. option_phoneme_input = flags & espeakPHONEMES;
  321. option_endpause = flags & espeakENDPAUSE;
  322. count_samples = 0;
  323. #ifdef USE_ASYNC
  324. if (my_mode == ENOUTPUT_MODE_SPEAK_AUDIO)
  325. a_write_pos = wave_get_write_position(my_audio);
  326. #endif
  327. if (translator == NULL)
  328. espeak_SetVoiceByName("default");
  329. SpeakNextClause(NULL, text, 0);
  330. if (my_mode == (ENOUTPUT_MODE_SYNCHRONOUS | ENOUTPUT_MODE_SPEAK_AUDIO)) {
  331. for (;;) {
  332. #ifdef PLATFORM_WINDOWS
  333. Sleep(300); // 0.3s
  334. #else
  335. #ifdef USE_NANOSLEEP
  336. struct timespec period;
  337. struct timespec remaining;
  338. period.tv_sec = 0;
  339. period.tv_nsec = 300000000; // 0.3 sec
  340. nanosleep(&period, &remaining);
  341. #else
  342. sleep(1);
  343. #endif
  344. #endif
  345. if (SynthOnTimer() != 0)
  346. break;
  347. }
  348. return ENS_OK;
  349. }
  350. for (;;) {
  351. out_ptr = outbuf;
  352. out_end = &outbuf[outbuf_size];
  353. event_list_ix = 0;
  354. WavegenFill();
  355. length = (out_ptr - outbuf)/2;
  356. count_samples += length;
  357. event_list[event_list_ix].type = espeakEVENT_LIST_TERMINATED; // indicates end of event list
  358. event_list[event_list_ix].unique_identifier = unique_identifier;
  359. event_list[event_list_ix].user_data = my_user_data;
  360. count_buffers++;
  361. if (my_mode == ENOUTPUT_MODE_SPEAK_AUDIO) {
  362. #ifdef USE_ASYNC
  363. finished = create_events((short *)outbuf, length, event_list, a_write_pos);
  364. if (finished < 0)
  365. return ENS_AUDIO_ERROR;
  366. #endif
  367. } else
  368. finished = synth_callback((short *)outbuf, length, event_list);
  369. if (finished) {
  370. SpeakNextClause(NULL, 0, 2); // stop
  371. break;
  372. }
  373. if (Generate(phoneme_list, &n_phoneme_list, 1) == 0) {
  374. if (WcmdqUsed() == 0) {
  375. // don't process the next clause until the previous clause has finished generating speech.
  376. // This ensures that <audio> tag (which causes end-of-clause) is at a sound buffer boundary
  377. event_list[0].type = espeakEVENT_LIST_TERMINATED;
  378. event_list[0].unique_identifier = my_unique_identifier;
  379. event_list[0].user_data = my_user_data;
  380. if (SpeakNextClause(NULL, NULL, 1) == 0) {
  381. #ifdef USE_ASYNC
  382. if (my_mode == ENOUTPUT_MODE_SPEAK_AUDIO) {
  383. if (dispatch_audio(NULL, 0, NULL) < 0)
  384. return ENS_AUDIO_ERROR;
  385. } else
  386. synth_callback(NULL, 0, event_list); // NULL buffer ptr indicates end of data
  387. #else
  388. synth_callback(NULL, 0, event_list); // NULL buffer ptr indicates end of data
  389. #endif
  390. break;
  391. }
  392. }
  393. }
  394. }
  395. return ENS_OK;
  396. }
  397. void MarkerEvent(int type, unsigned int char_position, int value, int value2, unsigned char *out_ptr)
  398. {
  399. // type: 1=word, 2=sentence, 3=named mark, 4=play audio, 5=end, 7=phoneme
  400. espeak_EVENT *ep;
  401. double time;
  402. if ((event_list == NULL) || (event_list_ix >= (n_event_list-2)))
  403. return;
  404. ep = &event_list[event_list_ix++];
  405. ep->type = (espeak_EVENT_TYPE)type;
  406. ep->unique_identifier = my_unique_identifier;
  407. ep->user_data = my_user_data;
  408. ep->text_position = char_position & 0xffffff;
  409. ep->length = char_position >> 24;
  410. time = ((double)(count_samples + mbrola_delay + (out_ptr - out_start)/2)*1000.0)/samplerate;
  411. ep->audio_position = (int)time;
  412. ep->sample = (count_samples + mbrola_delay + (out_ptr - out_start)/2);
  413. if ((type == espeakEVENT_MARK) || (type == espeakEVENT_PLAY))
  414. ep->id.name = &namedata[value];
  415. else if (type == espeakEVENT_PHONEME) {
  416. int *p;
  417. p = (int *)(ep->id.string);
  418. p[0] = value;
  419. p[1] = value2;
  420. } else
  421. ep->id.number = value;
  422. }
  423. espeak_ng_STATUS sync_espeak_Synth(unsigned int unique_identifier, const void *text,
  424. unsigned int position, espeak_POSITION_TYPE position_type,
  425. unsigned int end_position, unsigned int flags, void *user_data)
  426. {
  427. InitText(flags);
  428. my_unique_identifier = unique_identifier;
  429. my_user_data = user_data;
  430. for (int i = 0; i < N_SPEECH_PARAM; i++)
  431. saved_parameters[i] = param_stack[0].parameter[i];
  432. switch (position_type)
  433. {
  434. case POS_CHARACTER:
  435. skip_characters = position;
  436. break;
  437. case POS_WORD:
  438. skip_words = position;
  439. break;
  440. case POS_SENTENCE:
  441. skip_sentences = position;
  442. break;
  443. }
  444. if (skip_characters || skip_words || skip_sentences)
  445. skipping_text = 1;
  446. end_character_position = end_position;
  447. espeak_ng_STATUS aStatus = Synthesize(unique_identifier, text, flags);
  448. #ifdef USE_ASYNC
  449. wave_flush(my_audio);
  450. #endif
  451. return aStatus;
  452. }
  453. espeak_ng_STATUS sync_espeak_Synth_Mark(unsigned int unique_identifier, const void *text,
  454. const char *index_mark, unsigned int end_position,
  455. unsigned int flags, void *user_data)
  456. {
  457. InitText(flags);
  458. my_unique_identifier = unique_identifier;
  459. my_user_data = user_data;
  460. if (index_mark != NULL) {
  461. strncpy0(skip_marker, index_mark, sizeof(skip_marker));
  462. skipping_text = 1;
  463. }
  464. end_character_position = end_position;
  465. return Synthesize(unique_identifier, text, flags | espeakSSML);
  466. }
  467. espeak_ng_STATUS sync_espeak_Key(const char *key)
  468. {
  469. // symbolic name, symbolicname_character - is there a system resource of symbolic names per language?
  470. int letter;
  471. int ix;
  472. ix = utf8_in(&letter, key);
  473. if (key[ix] == 0) // a single character
  474. return sync_espeak_Char(letter);
  475. my_unique_identifier = 0;
  476. my_user_data = NULL;
  477. return Synthesize(0, key, 0); // speak key as a text string
  478. }
  479. espeak_ng_STATUS sync_espeak_Char(wchar_t character)
  480. {
  481. // is there a system resource of character names per language?
  482. char buf[80];
  483. my_unique_identifier = 0;
  484. my_user_data = NULL;
  485. sprintf(buf, "<say-as interpret-as=\"tts:char\">&#%d;</say-as>", character);
  486. return Synthesize(0, buf, espeakSSML);
  487. }
  488. void sync_espeak_SetPunctuationList(const wchar_t *punctlist)
  489. {
  490. // Set the list of punctuation which are spoken for "some".
  491. my_unique_identifier = 0;
  492. my_user_data = NULL;
  493. option_punctlist[0] = 0;
  494. if (punctlist != NULL) {
  495. wcsncpy(option_punctlist, punctlist, N_PUNCTLIST);
  496. option_punctlist[N_PUNCTLIST-1] = 0;
  497. }
  498. }
  499. #pragma GCC visibility push(default)
  500. ESPEAK_API void espeak_SetSynthCallback(t_espeak_callback *SynthCallback)
  501. {
  502. synth_callback = SynthCallback;
  503. #ifdef USE_ASYNC
  504. event_set_callback(synth_callback);
  505. #endif
  506. }
  507. ESPEAK_API void espeak_SetUriCallback(int (*UriCallback)(int, const char *, const char *))
  508. {
  509. uri_callback = UriCallback;
  510. }
  511. ESPEAK_API void espeak_SetPhonemeCallback(int (*PhonemeCallback)(const char *))
  512. {
  513. phoneme_callback = PhonemeCallback;
  514. }
  515. ESPEAK_NG_API espeak_ng_STATUS
  516. espeak_ng_Synthesize(const void *text, size_t size,
  517. unsigned int position,
  518. espeak_POSITION_TYPE position_type,
  519. unsigned int end_position, unsigned int flags,
  520. unsigned int *unique_identifier, void *user_data)
  521. {
  522. (void)size; // unused in non-async modes
  523. static unsigned int temp_identifier;
  524. if (unique_identifier == NULL)
  525. unique_identifier = &temp_identifier;
  526. *unique_identifier = 0;
  527. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  528. return sync_espeak_Synth(0, text, position, position_type, end_position, flags, user_data);
  529. #ifdef USE_ASYNC
  530. // Create the text command
  531. t_espeak_command *c1 = create_espeak_text(text, size, position, position_type, end_position, flags, user_data);
  532. // Retrieve the unique identifier
  533. *unique_identifier = c1->u.my_text.unique_identifier;
  534. // Create the "terminated msg" command (same uid)
  535. t_espeak_command *c2 = create_espeak_terminated_msg(*unique_identifier, user_data);
  536. // Try to add these 2 commands (single transaction)
  537. if (c1 && c2) {
  538. espeak_ng_STATUS status = fifo_add_commands(c1, c2);
  539. if (status != ENS_OK) {
  540. delete_espeak_command(c1);
  541. delete_espeak_command(c2);
  542. }
  543. return status;
  544. }
  545. delete_espeak_command(c1);
  546. delete_espeak_command(c2);
  547. return ENOMEM;
  548. #else
  549. return sync_espeak_Synth(0, text, position, position_type, end_position, flags, user_data);
  550. #endif
  551. }
  552. ESPEAK_NG_API espeak_ng_STATUS
  553. espeak_ng_SynthesizeMark(const void *text,
  554. size_t size,
  555. const char *index_mark,
  556. unsigned int end_position,
  557. unsigned int flags,
  558. unsigned int *unique_identifier,
  559. void *user_data)
  560. {
  561. (void)size; // unused in non-async modes
  562. static unsigned int temp_identifier;
  563. if (unique_identifier == NULL)
  564. unique_identifier = &temp_identifier;
  565. *unique_identifier = 0;
  566. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  567. return sync_espeak_Synth_Mark(0, text, index_mark, end_position, flags, user_data);
  568. #ifdef USE_ASYNC
  569. // Create the mark command
  570. t_espeak_command *c1 = create_espeak_mark(text, size, index_mark, end_position,
  571. flags, user_data);
  572. // Retrieve the unique identifier
  573. *unique_identifier = c1->u.my_mark.unique_identifier;
  574. // Create the "terminated msg" command (same uid)
  575. t_espeak_command *c2 = create_espeak_terminated_msg(*unique_identifier, user_data);
  576. // Try to add these 2 commands (single transaction)
  577. if (c1 && c2) {
  578. espeak_ng_STATUS status = fifo_add_commands(c1, c2);
  579. if (status != ENS_OK) {
  580. delete_espeak_command(c1);
  581. delete_espeak_command(c2);
  582. }
  583. return status;
  584. }
  585. delete_espeak_command(c1);
  586. delete_espeak_command(c2);
  587. return ENOMEM;
  588. #else
  589. return sync_espeak_Synth_Mark(0, text, index_mark, end_position, flags, user_data);
  590. #endif
  591. }
  592. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SpeakKeyName(const char *key_name)
  593. {
  594. // symbolic name, symbolicname_character - is there a system resource of symbolicnames per language
  595. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  596. return sync_espeak_Key(key_name);
  597. #ifdef USE_ASYNC
  598. t_espeak_command *c = create_espeak_key(key_name, NULL);
  599. espeak_ng_STATUS status = fifo_add_command(c);
  600. if (status != ENS_OK)
  601. delete_espeak_command(c);
  602. return status;
  603. #else
  604. return sync_espeak_Key(key_name);
  605. #endif
  606. }
  607. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SpeakCharacter(wchar_t character)
  608. {
  609. // is there a system resource of character names per language?
  610. #ifdef USE_ASYNC
  611. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  612. return sync_espeak_Char(character);
  613. t_espeak_command *c = create_espeak_char(character, NULL);
  614. espeak_ng_STATUS status = fifo_add_command(c);
  615. if (status != ENS_OK)
  616. delete_espeak_command(c);
  617. return status;
  618. #else
  619. return sync_espeak_Char(character);
  620. #endif
  621. }
  622. ESPEAK_API int espeak_GetParameter(espeak_PARAMETER parameter, int current)
  623. {
  624. // current: 0=default value, 1=current value
  625. if (current)
  626. return param_stack[0].parameter[parameter];
  627. return param_defaults[parameter];
  628. }
  629. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SetParameter(espeak_PARAMETER parameter, int value, int relative)
  630. {
  631. #ifdef USE_ASYNC
  632. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  633. return SetParameter(parameter, value, relative);
  634. t_espeak_command *c = create_espeak_parameter(parameter, value, relative);
  635. espeak_ng_STATUS status = fifo_add_command(c);
  636. if (status != ENS_OK)
  637. delete_espeak_command(c);
  638. return status;
  639. #else
  640. return SetParameter(parameter, value, relative);
  641. #endif
  642. }
  643. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SetPunctuationList(const wchar_t *punctlist)
  644. {
  645. // Set the list of punctuation which are spoken for "some".
  646. #ifdef USE_ASYNC
  647. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS) {
  648. sync_espeak_SetPunctuationList(punctlist);
  649. return ENS_OK;
  650. }
  651. t_espeak_command *c = create_espeak_punctuation_list(punctlist);
  652. espeak_ng_STATUS status = fifo_add_command(c);
  653. if (status != ENS_OK)
  654. delete_espeak_command(c);
  655. return status;
  656. #else
  657. sync_espeak_SetPunctuationList(punctlist);
  658. return ENS_OK;
  659. #endif
  660. }
  661. ESPEAK_API void espeak_SetPhonemeTrace(int phonememode, FILE *stream)
  662. {
  663. /* phonememode: Controls the output of phoneme symbols for the text
  664. bits 0-2:
  665. value=0 No phoneme output (default)
  666. value=1 Output the translated phoneme symbols for the text
  667. value=2 as (1), but produces IPA phoneme names rather than ascii
  668. bit 3: output a trace of how the translation was done (showing the matching rules and list entries)
  669. bit 4: produce pho data for mbrola
  670. bit 7: use (bits 8-23) as a tie within multi-letter phonemes names
  671. bits 8-23: separator character, between phoneme names
  672. stream output stream for the phoneme symbols (and trace). If stream=NULL then it uses stdout.
  673. */
  674. option_phonemes = phonememode;
  675. f_trans = stream;
  676. if (stream == NULL)
  677. f_trans = stderr;
  678. }
  679. ESPEAK_API const char *espeak_TextToPhonemes(const void **textptr, int textmode, int phonememode)
  680. {
  681. /* phoneme_mode
  682. bit 1: 0=eSpeak's ascii phoneme names, 1= International Phonetic Alphabet (as UTF-8 characters).
  683. bit 7: use (bits 8-23) as a tie within multi-letter phonemes names
  684. bits 8-23: separator character, between phoneme names
  685. */
  686. option_multibyte = textmode & 7;
  687. *textptr = TranslateClause(translator, NULL, *textptr, NULL, NULL);
  688. return GetTranslatedPhonemeString(phonememode);
  689. }
  690. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Cancel(void)
  691. {
  692. #ifdef USE_ASYNC
  693. fifo_stop();
  694. event_clear_all();
  695. if (my_mode == ENOUTPUT_MODE_SPEAK_AUDIO)
  696. wave_close(my_audio);
  697. #endif
  698. embedded_value[EMBED_T] = 0; // reset echo for pronunciation announcements
  699. for (int i = 0; i < N_SPEECH_PARAM; i++)
  700. SetParameter(i, saved_parameters[i], 0);
  701. return ENS_OK;
  702. }
  703. ESPEAK_API int espeak_IsPlaying(void)
  704. {
  705. #ifdef USE_ASYNC
  706. if ((my_mode == ENOUTPUT_MODE_SPEAK_AUDIO) && wave_is_busy(my_audio))
  707. return 1;
  708. return fifo_is_busy();
  709. #else
  710. return 0;
  711. #endif
  712. }
  713. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Synchronize(void)
  714. {
  715. espeak_ng_STATUS berr = err;
  716. #ifdef USE_ASYNC
  717. while (espeak_IsPlaying())
  718. usleep(20000);
  719. #endif
  720. err = ENS_OK;
  721. return berr;
  722. }
  723. extern void FreePhData(void);
  724. extern void FreeVoiceList(void);
  725. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Terminate(void)
  726. {
  727. #ifdef USE_ASYNC
  728. fifo_stop();
  729. fifo_terminate();
  730. event_terminate();
  731. if (my_mode == ENOUTPUT_MODE_SPEAK_AUDIO) {
  732. wave_close(my_audio);
  733. wave_terminate();
  734. out_samplerate = 0;
  735. }
  736. #endif
  737. Free(event_list);
  738. event_list = NULL;
  739. Free(outbuf);
  740. outbuf = NULL;
  741. FreePhData();
  742. FreeVoiceList();
  743. return ENS_OK;
  744. }
  745. ESPEAK_API const char *espeak_Info(const char **ptr)
  746. {
  747. if (ptr != NULL)
  748. *ptr = path_home;
  749. return version_string;
  750. }
  751. #pragma GCC visibility pop