eSpeak NG is an open source speech synthesizer that supports more than hundred languages and accents.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

speech.c 24KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919
  1. /*
  2. * Copyright (C) 2005 to 2013 by Jonathan Duddington
  3. * email: [email protected]
  4. * Copyright (C) 2013-2017 Reece H. Dunn
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 3 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see: <http://www.gnu.org/licenses/>.
  18. */
  19. #include "config.h"
  20. #include <assert.h>
  21. #include <ctype.h>
  22. #include <errno.h>
  23. #include <locale.h>
  24. #include <stdbool.h>
  25. #include <stdint.h>
  26. #include <stdio.h>
  27. #include <stdlib.h>
  28. #include <string.h>
  29. #include <sys/stat.h>
  30. #include <time.h>
  31. #include <unistd.h>
  32. #include <wchar.h>
  33. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  34. #include <pcaudiolib/audio.h>
  35. #endif
  36. #if defined(_WIN32) || defined(_WIN64)
  37. #include <fcntl.h>
  38. #include <io.h>
  39. #include <windows.h>
  40. #include <winreg.h>
  41. #endif
  42. #include <espeak-ng/espeak_ng.h>
  43. #include <espeak-ng/speak_lib.h>
  44. #include <espeak-ng/encoding.h>
  45. #include "speech.h"
  46. #include "phoneme.h"
  47. #include "synthesize.h"
  48. #include "voice.h"
  49. #include "translate.h"
  50. #include "espeak_command.h"
  51. #include "fifo.h"
  52. #include "event.h"
  53. unsigned char *outbuf = NULL;
  54. int outbuf_size = 0;
  55. espeak_EVENT *event_list = NULL;
  56. int event_list_ix = 0;
  57. int n_event_list;
  58. long count_samples;
  59. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  60. struct audio_object *my_audio = NULL;
  61. #endif
  62. static const char *option_device = NULL;
  63. static unsigned int my_unique_identifier = 0;
  64. static void *my_user_data = NULL;
  65. static espeak_ng_OUTPUT_MODE my_mode = ENOUTPUT_MODE_SYNCHRONOUS;
  66. static int out_samplerate = 0;
  67. static int voice_samplerate = 22050;
  68. static espeak_ng_STATUS err = ENS_OK;
  69. t_espeak_callback *synth_callback = NULL;
  70. int (*uri_callback)(int, const char *, const char *) = NULL;
  71. int (*phoneme_callback)(const char *) = NULL;
  72. char path_home[N_PATH_HOME]; // this is the espeak-ng-data directory
  73. extern int saved_parameters[N_SPEECH_PARAM]; // Parameters saved on synthesis start
  74. void cancel_audio(void)
  75. {
  76. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  77. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
  78. audio_object_flush(my_audio);
  79. }
  80. #endif
  81. }
  82. static int dispatch_audio(short *outbuf, int length, espeak_EVENT *event)
  83. {
  84. int a_wave_can_be_played = 1;
  85. #ifdef USE_ASYNC
  86. if ((my_mode & ENOUTPUT_MODE_SYNCHRONOUS) == 0)
  87. a_wave_can_be_played = fifo_is_command_enabled();
  88. #endif
  89. switch ((int)my_mode)
  90. {
  91. case ENOUTPUT_MODE_SPEAK_AUDIO:
  92. case ENOUTPUT_MODE_SPEAK_AUDIO | ENOUTPUT_MODE_SYNCHRONOUS:
  93. {
  94. int event_type = 0;
  95. if (event)
  96. event_type = event->type;
  97. if (event_type == espeakEVENT_SAMPLERATE) {
  98. voice_samplerate = event->id.number;
  99. if (out_samplerate != voice_samplerate) {
  100. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  101. if (out_samplerate != 0) {
  102. // sound was previously open with a different sample rate
  103. audio_object_close(my_audio);
  104. #ifdef HAVE_SLEEP
  105. sleep(1);
  106. #endif
  107. }
  108. #endif
  109. out_samplerate = voice_samplerate;
  110. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  111. int error = audio_object_open(my_audio, AUDIO_OBJECT_FORMAT_S16LE, voice_samplerate, 1);
  112. if (error != 0) {
  113. fprintf(stderr, "error: %s\n", audio_object_strerror(my_audio, error));
  114. err = ENS_AUDIO_ERROR;
  115. return -1;
  116. }
  117. #endif
  118. #ifdef USE_ASYNC
  119. if ((my_mode & ENOUTPUT_MODE_SYNCHRONOUS) == 0)
  120. event_init();
  121. #endif
  122. }
  123. }
  124. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  125. if (outbuf && length && a_wave_can_be_played) {
  126. int error = audio_object_write(my_audio, (char *)outbuf, 2*length);
  127. if (error != 0)
  128. fprintf(stderr, "error: %s\n", audio_object_strerror(my_audio, error));
  129. }
  130. #endif
  131. #ifdef USE_ASYNC
  132. while (event && a_wave_can_be_played) {
  133. // TBD: some event are filtered here but some insight might be given
  134. // TBD: in synthesise.cpp for avoiding to create WORDs with size=0.
  135. // TBD: For example sentence "or ALT)." returns three words
  136. // "or", "ALT" and "".
  137. // TBD: the last one has its size=0.
  138. if ((event->type == espeakEVENT_WORD) && (event->length == 0))
  139. break;
  140. if ((my_mode & ENOUTPUT_MODE_SYNCHRONOUS) == 0) {
  141. err = event_declare(event);
  142. if (err != ENS_EVENT_BUFFER_FULL)
  143. break;
  144. usleep(10000);
  145. a_wave_can_be_played = fifo_is_command_enabled();
  146. } else
  147. break;
  148. }
  149. #endif
  150. }
  151. break;
  152. case 0:
  153. if (synth_callback)
  154. synth_callback(outbuf, length, event);
  155. break;
  156. }
  157. return a_wave_can_be_played == 0; // 1 = stop synthesis, -1 = error
  158. }
  159. static int create_events(short *outbuf, int length, espeak_EVENT *event_list)
  160. {
  161. int finished;
  162. int i = 0;
  163. // The audio data are written to the output device.
  164. // The list of events in event_list (index: event_list_ix) is read:
  165. // Each event is declared to the "event" object which stores them internally.
  166. // The event object is responsible of calling the external callback
  167. // as soon as the relevant audio sample is played.
  168. do { // for each event
  169. espeak_EVENT *event;
  170. if (event_list_ix == 0)
  171. event = NULL;
  172. else
  173. event = event_list + i;
  174. finished = dispatch_audio((short *)outbuf, length, event);
  175. length = 0; // the wave data are played once.
  176. i++;
  177. } while ((i < event_list_ix) && !finished);
  178. return finished;
  179. }
  180. #ifdef USE_ASYNC
  181. int sync_espeak_terminated_msg(uint32_t unique_identifier, void *user_data)
  182. {
  183. int finished = 0;
  184. memset(event_list, 0, 2*sizeof(espeak_EVENT));
  185. event_list[0].type = espeakEVENT_MSG_TERMINATED;
  186. event_list[0].unique_identifier = unique_identifier;
  187. event_list[0].user_data = user_data;
  188. event_list[1].type = espeakEVENT_LIST_TERMINATED;
  189. event_list[1].unique_identifier = unique_identifier;
  190. event_list[1].user_data = user_data;
  191. if (my_mode == ENOUTPUT_MODE_SPEAK_AUDIO) {
  192. while (1) {
  193. err = event_declare(event_list);
  194. if (err != ENS_EVENT_BUFFER_FULL)
  195. break;
  196. usleep(10000);
  197. }
  198. } else if (synth_callback)
  199. finished = synth_callback(NULL, 0, event_list);
  200. return finished;
  201. }
  202. #endif
  203. static int check_data_path(const char *path, int allow_directory)
  204. {
  205. if (!path) return 0;
  206. snprintf(path_home, sizeof(path_home), "%s/espeak-ng-data", path);
  207. if (GetFileLength(path_home) == -EISDIR)
  208. return 1;
  209. if (!allow_directory)
  210. return 0;
  211. snprintf(path_home, sizeof(path_home), "%s", path);
  212. return GetFileLength(path_home) == -EISDIR;
  213. }
  214. #pragma GCC visibility push(default)
  215. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_InitializeOutput(espeak_ng_OUTPUT_MODE output_mode, int buffer_length, const char *device)
  216. {
  217. option_device = device;
  218. my_mode = output_mode;
  219. out_samplerate = 0;
  220. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  221. if (my_audio == NULL)
  222. my_audio = create_audio_device_object(device, "eSpeak", "Text-to-Speech");
  223. #endif
  224. // buffer_length is in mS, allocate 2 bytes per sample
  225. if (buffer_length == 0)
  226. buffer_length = 60;
  227. outbuf_size = (buffer_length * samplerate)/500;
  228. out_start = (unsigned char *)realloc(outbuf, outbuf_size);
  229. if (out_start == NULL)
  230. return ENOMEM;
  231. else
  232. outbuf = out_start;
  233. // allocate space for event list. Allow 200 events per second.
  234. // Add a constant to allow for very small buffer_length
  235. n_event_list = (buffer_length*200)/1000 + 20;
  236. espeak_EVENT *new_event_list = (espeak_EVENT *)realloc(event_list, sizeof(espeak_EVENT) * n_event_list);
  237. if (new_event_list == NULL)
  238. return ENOMEM;
  239. event_list = new_event_list;
  240. return ENS_OK;
  241. }
  242. int GetFileLength(const char *filename)
  243. {
  244. struct stat statbuf;
  245. if (stat(filename, &statbuf) != 0)
  246. return -errno;
  247. if (S_ISDIR(statbuf.st_mode))
  248. return -EISDIR;
  249. return statbuf.st_size;
  250. }
  251. ESPEAK_NG_API void espeak_ng_InitializePath(const char *path)
  252. {
  253. if (check_data_path(path, 1))
  254. return;
  255. #ifdef PLATFORM_WINDOWS
  256. HKEY RegKey;
  257. unsigned long size;
  258. unsigned long var_type;
  259. unsigned char buf[sizeof(path_home)-13];
  260. if (check_data_path(getenv("ESPEAK_DATA_PATH"), 1))
  261. return;
  262. buf[0] = 0;
  263. RegOpenKeyExA(HKEY_LOCAL_MACHINE, "Software\\eSpeak NG", 0, KEY_READ, &RegKey);
  264. if (RegKey == NULL)
  265. RegOpenKeyExA(HKEY_LOCAL_MACHINE, "Software\\WOW6432Node\\eSpeak NG", 0, KEY_READ, &RegKey);
  266. size = sizeof(buf);
  267. var_type = REG_SZ;
  268. RegQueryValueExA(RegKey, "Path", 0, &var_type, buf, &size);
  269. if (check_data_path(buf, 1))
  270. return;
  271. #elif !defined(PLATFORM_DOS)
  272. if (check_data_path(getenv("ESPEAK_DATA_PATH"), 1))
  273. return;
  274. if (check_data_path(getenv("HOME"), 0))
  275. return;
  276. #endif
  277. strcpy(path_home, PATH_ESPEAK_DATA);
  278. }
  279. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Initialize(espeak_ng_ERROR_CONTEXT *context)
  280. {
  281. int param;
  282. int srate = 22050; // default sample rate 22050 Hz
  283. // It seems that the wctype functions don't work until the locale has been set
  284. // to something other than the default "C". Then, not only Latin1 but also the
  285. // other characters give the correct results with iswalpha() etc.
  286. if (setlocale(LC_CTYPE, "C.UTF-8") == NULL) {
  287. if (setlocale(LC_CTYPE, "UTF-8") == NULL) {
  288. if (setlocale(LC_CTYPE, "en_US.UTF-8") == NULL)
  289. setlocale(LC_CTYPE, "");
  290. }
  291. }
  292. espeak_ng_STATUS result = LoadPhData(&srate, context);
  293. if (result != ENS_OK)
  294. return result;
  295. WavegenInit(srate, 0);
  296. LoadConfig();
  297. memset(&current_voice_selected, 0, sizeof(current_voice_selected));
  298. SetVoiceStack(NULL, "");
  299. SynthesizeInit();
  300. InitNamedata();
  301. VoiceReset(0);
  302. for (param = 0; param < N_SPEECH_PARAM; param++)
  303. param_stack[0].parameter[param] = saved_parameters[param] = param_defaults[param];
  304. SetParameter(espeakRATE, 175, 0);
  305. SetParameter(espeakVOLUME, 100, 0);
  306. SetParameter(espeakCAPITALS, option_capitals, 0);
  307. SetParameter(espeakPUNCTUATION, option_punctuation, 0);
  308. SetParameter(espeakWORDGAP, 0, 0);
  309. #ifdef USE_ASYNC
  310. fifo_init();
  311. #endif
  312. option_phonemes = 0;
  313. option_phoneme_events = 0;
  314. return ENS_OK;
  315. }
  316. ESPEAK_NG_API int espeak_ng_GetSampleRate(void)
  317. {
  318. return samplerate;
  319. }
  320. #pragma GCC visibility pop
  321. static espeak_ng_STATUS Synthesize(unsigned int unique_identifier, const void *text, int flags)
  322. {
  323. // Fill the buffer with output sound
  324. int length;
  325. int finished = 0;
  326. int count_buffers = 0;
  327. if ((outbuf == NULL) || (event_list == NULL))
  328. return ENS_NOT_INITIALIZED;
  329. option_ssml = flags & espeakSSML;
  330. option_phoneme_input = flags & espeakPHONEMES;
  331. option_endpause = flags & espeakENDPAUSE;
  332. count_samples = 0;
  333. if (translator == NULL)
  334. espeak_SetVoiceByName("en");
  335. if (p_decoder == NULL)
  336. p_decoder = create_text_decoder();
  337. espeak_ng_STATUS status;
  338. status = text_decoder_decode_string_multibyte(p_decoder, text, translator->encoding, flags);
  339. if (status != ENS_OK)
  340. return status;
  341. SpeakNextClause(0);
  342. for (;;) {
  343. out_ptr = outbuf;
  344. out_end = &outbuf[outbuf_size];
  345. event_list_ix = 0;
  346. WavegenFill();
  347. length = (out_ptr - outbuf)/2;
  348. count_samples += length;
  349. event_list[event_list_ix].type = espeakEVENT_LIST_TERMINATED; // indicates end of event list
  350. event_list[event_list_ix].unique_identifier = unique_identifier;
  351. event_list[event_list_ix].user_data = my_user_data;
  352. count_buffers++;
  353. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
  354. finished = create_events((short *)outbuf, length, event_list);
  355. if (finished < 0)
  356. return ENS_AUDIO_ERROR;
  357. } else if (synth_callback)
  358. finished = synth_callback((short *)outbuf, length, event_list);
  359. if (finished) {
  360. SpeakNextClause(2); // stop
  361. return ENS_SPEECH_STOPPED;
  362. }
  363. if (Generate(phoneme_list, &n_phoneme_list, 1) == 0) {
  364. if (WcmdqUsed() == 0) {
  365. // don't process the next clause until the previous clause has finished generating speech.
  366. // This ensures that <audio> tag (which causes end-of-clause) is at a sound buffer boundary
  367. event_list[0].type = espeakEVENT_LIST_TERMINATED;
  368. event_list[0].unique_identifier = my_unique_identifier;
  369. event_list[0].user_data = my_user_data;
  370. if (SpeakNextClause(1) == 0) {
  371. finished = 0;
  372. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
  373. if (dispatch_audio(NULL, 0, NULL) < 0)
  374. return ENS_AUDIO_ERROR;
  375. } else if (synth_callback)
  376. finished = synth_callback(NULL, 0, event_list); // NULL buffer ptr indicates end of data
  377. if (finished) {
  378. SpeakNextClause(2); // stop
  379. return ENS_SPEECH_STOPPED;
  380. }
  381. return ENS_OK;
  382. }
  383. }
  384. }
  385. }
  386. }
  387. void MarkerEvent(int type, unsigned int char_position, int value, int value2, unsigned char *out_ptr)
  388. {
  389. // type: 1=word, 2=sentence, 3=named mark, 4=play audio, 5=end, 7=phoneme
  390. espeak_EVENT *ep;
  391. double time;
  392. if ((event_list == NULL) || (event_list_ix >= (n_event_list-2)))
  393. return;
  394. ep = &event_list[event_list_ix++];
  395. ep->type = (espeak_EVENT_TYPE)type;
  396. ep->unique_identifier = my_unique_identifier;
  397. ep->user_data = my_user_data;
  398. ep->text_position = char_position & 0xffffff;
  399. ep->length = char_position >> 24;
  400. time = ((double)(count_samples + mbrola_delay + (out_ptr - out_start)/2)*1000.0)/samplerate;
  401. ep->audio_position = (int)time;
  402. ep->sample = (count_samples + mbrola_delay + (out_ptr - out_start)/2);
  403. if ((type == espeakEVENT_MARK) || (type == espeakEVENT_PLAY))
  404. ep->id.name = &namedata[value];
  405. else if (type == espeakEVENT_PHONEME) {
  406. int *p;
  407. p = (int *)(ep->id.string);
  408. p[0] = value;
  409. p[1] = value2;
  410. } else
  411. ep->id.number = value;
  412. }
  413. espeak_ng_STATUS sync_espeak_Synth(unsigned int unique_identifier, const void *text,
  414. unsigned int position, espeak_POSITION_TYPE position_type,
  415. unsigned int end_position, unsigned int flags, void *user_data)
  416. {
  417. InitText(flags);
  418. my_unique_identifier = unique_identifier;
  419. my_user_data = user_data;
  420. for (int i = 0; i < N_SPEECH_PARAM; i++)
  421. saved_parameters[i] = param_stack[0].parameter[i];
  422. switch (position_type)
  423. {
  424. case POS_CHARACTER:
  425. skip_characters = position;
  426. break;
  427. case POS_WORD:
  428. skip_words = position;
  429. break;
  430. case POS_SENTENCE:
  431. skip_sentences = position;
  432. break;
  433. }
  434. if (skip_characters || skip_words || skip_sentences)
  435. skipping_text = 1;
  436. end_character_position = end_position;
  437. espeak_ng_STATUS aStatus = Synthesize(unique_identifier, text, flags);
  438. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  439. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
  440. int error = (aStatus == ENS_SPEECH_STOPPED)
  441. ? audio_object_flush(my_audio)
  442. : audio_object_drain(my_audio);
  443. if (error != 0)
  444. fprintf(stderr, "error: %s\n", audio_object_strerror(my_audio, error));
  445. }
  446. #endif
  447. return aStatus;
  448. }
  449. espeak_ng_STATUS sync_espeak_Synth_Mark(unsigned int unique_identifier, const void *text,
  450. const char *index_mark, unsigned int end_position,
  451. unsigned int flags, void *user_data)
  452. {
  453. InitText(flags);
  454. my_unique_identifier = unique_identifier;
  455. my_user_data = user_data;
  456. if (index_mark != NULL) {
  457. strncpy0(skip_marker, index_mark, sizeof(skip_marker));
  458. skipping_text = 1;
  459. }
  460. end_character_position = end_position;
  461. return Synthesize(unique_identifier, text, flags | espeakSSML);
  462. }
  463. espeak_ng_STATUS sync_espeak_Key(const char *key)
  464. {
  465. // symbolic name, symbolicname_character - is there a system resource of symbolic names per language?
  466. int letter;
  467. int ix;
  468. ix = utf8_in(&letter, key);
  469. if (key[ix] == 0) // a single character
  470. return sync_espeak_Char(letter);
  471. my_unique_identifier = 0;
  472. my_user_data = NULL;
  473. return Synthesize(0, key, 0); // speak key as a text string
  474. }
  475. espeak_ng_STATUS sync_espeak_Char(wchar_t character)
  476. {
  477. // is there a system resource of character names per language?
  478. char buf[80];
  479. my_unique_identifier = 0;
  480. my_user_data = NULL;
  481. sprintf(buf, "<say-as interpret-as=\"tts:char\">&#%d;</say-as>", character);
  482. return Synthesize(0, buf, espeakSSML);
  483. }
  484. void sync_espeak_SetPunctuationList(const wchar_t *punctlist)
  485. {
  486. // Set the list of punctuation which are spoken for "some".
  487. my_unique_identifier = 0;
  488. my_user_data = NULL;
  489. option_punctlist[0] = 0;
  490. if (punctlist != NULL) {
  491. wcsncpy(option_punctlist, punctlist, N_PUNCTLIST);
  492. option_punctlist[N_PUNCTLIST-1] = 0;
  493. }
  494. }
  495. #pragma GCC visibility push(default)
  496. ESPEAK_API void espeak_SetSynthCallback(t_espeak_callback *SynthCallback)
  497. {
  498. synth_callback = SynthCallback;
  499. #ifdef USE_ASYNC
  500. event_set_callback(synth_callback);
  501. #endif
  502. }
  503. ESPEAK_API void espeak_SetUriCallback(int (*UriCallback)(int, const char *, const char *))
  504. {
  505. uri_callback = UriCallback;
  506. }
  507. ESPEAK_API void espeak_SetPhonemeCallback(int (*PhonemeCallback)(const char *))
  508. {
  509. phoneme_callback = PhonemeCallback;
  510. }
  511. ESPEAK_NG_API espeak_ng_STATUS
  512. espeak_ng_Synthesize(const void *text, size_t size,
  513. unsigned int position,
  514. espeak_POSITION_TYPE position_type,
  515. unsigned int end_position, unsigned int flags,
  516. unsigned int *unique_identifier, void *user_data)
  517. {
  518. (void)size; // unused in non-async modes
  519. static unsigned int temp_identifier;
  520. if (unique_identifier == NULL)
  521. unique_identifier = &temp_identifier;
  522. *unique_identifier = 0;
  523. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  524. return sync_espeak_Synth(0, text, position, position_type, end_position, flags, user_data);
  525. #ifdef USE_ASYNC
  526. // Create the text command
  527. t_espeak_command *c1 = create_espeak_text(text, size, position, position_type, end_position, flags, user_data);
  528. if (c1) {
  529. // Retrieve the unique identifier
  530. *unique_identifier = c1->u.my_text.unique_identifier;
  531. }
  532. // Create the "terminated msg" command (same uid)
  533. t_espeak_command *c2 = create_espeak_terminated_msg(*unique_identifier, user_data);
  534. // Try to add these 2 commands (single transaction)
  535. if (c1 && c2) {
  536. espeak_ng_STATUS status = fifo_add_commands(c1, c2);
  537. if (status != ENS_OK) {
  538. delete_espeak_command(c1);
  539. delete_espeak_command(c2);
  540. }
  541. return status;
  542. }
  543. delete_espeak_command(c1);
  544. delete_espeak_command(c2);
  545. return ENOMEM;
  546. #else
  547. return sync_espeak_Synth(0, text, position, position_type, end_position, flags, user_data);
  548. #endif
  549. }
  550. ESPEAK_NG_API espeak_ng_STATUS
  551. espeak_ng_SynthesizeMark(const void *text,
  552. size_t size,
  553. const char *index_mark,
  554. unsigned int end_position,
  555. unsigned int flags,
  556. unsigned int *unique_identifier,
  557. void *user_data)
  558. {
  559. (void)size; // unused in non-async modes
  560. static unsigned int temp_identifier;
  561. if (unique_identifier == NULL)
  562. unique_identifier = &temp_identifier;
  563. *unique_identifier = 0;
  564. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  565. return sync_espeak_Synth_Mark(0, text, index_mark, end_position, flags, user_data);
  566. #ifdef USE_ASYNC
  567. // Create the mark command
  568. t_espeak_command *c1 = create_espeak_mark(text, size, index_mark, end_position,
  569. flags, user_data);
  570. if (c1) {
  571. // Retrieve the unique identifier
  572. *unique_identifier = c1->u.my_mark.unique_identifier;
  573. }
  574. // Create the "terminated msg" command (same uid)
  575. t_espeak_command *c2 = create_espeak_terminated_msg(*unique_identifier, user_data);
  576. // Try to add these 2 commands (single transaction)
  577. if (c1 && c2) {
  578. espeak_ng_STATUS status = fifo_add_commands(c1, c2);
  579. if (status != ENS_OK) {
  580. delete_espeak_command(c1);
  581. delete_espeak_command(c2);
  582. }
  583. return status;
  584. }
  585. delete_espeak_command(c1);
  586. delete_espeak_command(c2);
  587. return ENOMEM;
  588. #else
  589. return sync_espeak_Synth_Mark(0, text, index_mark, end_position, flags, user_data);
  590. #endif
  591. }
  592. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SpeakKeyName(const char *key_name)
  593. {
  594. // symbolic name, symbolicname_character - is there a system resource of symbolicnames per language
  595. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  596. return sync_espeak_Key(key_name);
  597. #ifdef USE_ASYNC
  598. t_espeak_command *c = create_espeak_key(key_name, NULL);
  599. espeak_ng_STATUS status = fifo_add_command(c);
  600. if (status != ENS_OK)
  601. delete_espeak_command(c);
  602. return status;
  603. #else
  604. return sync_espeak_Key(key_name);
  605. #endif
  606. }
  607. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SpeakCharacter(wchar_t character)
  608. {
  609. // is there a system resource of character names per language?
  610. #ifdef USE_ASYNC
  611. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  612. return sync_espeak_Char(character);
  613. t_espeak_command *c = create_espeak_char(character, NULL);
  614. espeak_ng_STATUS status = fifo_add_command(c);
  615. if (status != ENS_OK)
  616. delete_espeak_command(c);
  617. return status;
  618. #else
  619. return sync_espeak_Char(character);
  620. #endif
  621. }
  622. ESPEAK_API int espeak_GetParameter(espeak_PARAMETER parameter, int current)
  623. {
  624. // current: 0=default value, 1=current value
  625. if (current)
  626. return param_stack[0].parameter[parameter];
  627. return param_defaults[parameter];
  628. }
  629. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SetParameter(espeak_PARAMETER parameter, int value, int relative)
  630. {
  631. #ifdef USE_ASYNC
  632. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  633. return SetParameter(parameter, value, relative);
  634. t_espeak_command *c = create_espeak_parameter(parameter, value, relative);
  635. espeak_ng_STATUS status = fifo_add_command(c);
  636. if (status != ENS_OK)
  637. delete_espeak_command(c);
  638. return status;
  639. #else
  640. return SetParameter(parameter, value, relative);
  641. #endif
  642. }
  643. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SetPunctuationList(const wchar_t *punctlist)
  644. {
  645. // Set the list of punctuation which are spoken for "some".
  646. #ifdef USE_ASYNC
  647. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS) {
  648. sync_espeak_SetPunctuationList(punctlist);
  649. return ENS_OK;
  650. }
  651. t_espeak_command *c = create_espeak_punctuation_list(punctlist);
  652. espeak_ng_STATUS status = fifo_add_command(c);
  653. if (status != ENS_OK)
  654. delete_espeak_command(c);
  655. return status;
  656. #else
  657. sync_espeak_SetPunctuationList(punctlist);
  658. return ENS_OK;
  659. #endif
  660. }
  661. ESPEAK_API void espeak_SetPhonemeTrace(int phonememode, FILE *stream)
  662. {
  663. /* phonememode: Controls the output of phoneme symbols for the text
  664. bits 0-2:
  665. value=0 No phoneme output (default)
  666. value=1 Output the translated phoneme symbols for the text
  667. value=2 as (1), but produces IPA phoneme names rather than ascii
  668. bit 3: output a trace of how the translation was done (showing the matching rules and list entries)
  669. bit 4: produce pho data for mbrola
  670. bit 7: use (bits 8-23) as a tie within multi-letter phonemes names
  671. bits 8-23: separator character, between phoneme names
  672. stream output stream for the phoneme symbols (and trace). If stream=NULL then it uses stdout.
  673. */
  674. option_phonemes = phonememode;
  675. f_trans = stream;
  676. if (stream == NULL)
  677. f_trans = stderr;
  678. }
  679. ESPEAK_API const char *espeak_TextToPhonemes(const void **textptr, int textmode, int phonememode)
  680. {
  681. /* phoneme_mode
  682. bit 1: 0=eSpeak's ascii phoneme names, 1= International Phonetic Alphabet (as UTF-8 characters).
  683. bit 7: use (bits 8-23) as a tie within multi-letter phonemes names
  684. bits 8-23: separator character, between phoneme names
  685. */
  686. if (p_decoder == NULL)
  687. p_decoder = create_text_decoder();
  688. if (text_decoder_decode_string_multibyte(p_decoder, *textptr, translator->encoding, textmode) != ENS_OK)
  689. return NULL;
  690. TranslateClause(translator, NULL, NULL);
  691. *textptr = text_decoder_get_buffer(p_decoder);
  692. return GetTranslatedPhonemeString(phonememode);
  693. }
  694. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Cancel(void)
  695. {
  696. #ifdef USE_ASYNC
  697. fifo_stop();
  698. event_clear_all();
  699. #endif
  700. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  701. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO)
  702. audio_object_flush(my_audio);
  703. #endif
  704. embedded_value[EMBED_T] = 0; // reset echo for pronunciation announcements
  705. for (int i = 0; i < N_SPEECH_PARAM; i++)
  706. SetParameter(i, saved_parameters[i], 0);
  707. return ENS_OK;
  708. }
  709. ESPEAK_API int espeak_IsPlaying(void)
  710. {
  711. #ifdef USE_ASYNC
  712. return fifo_is_busy();
  713. #else
  714. return 0;
  715. #endif
  716. }
  717. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Synchronize(void)
  718. {
  719. espeak_ng_STATUS berr = err;
  720. #ifdef USE_ASYNC
  721. while (espeak_IsPlaying())
  722. usleep(20000);
  723. #endif
  724. err = ENS_OK;
  725. return berr;
  726. }
  727. extern void FreePhData(void);
  728. extern void FreeVoiceList(void);
  729. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Terminate(void)
  730. {
  731. #ifdef USE_ASYNC
  732. fifo_stop();
  733. fifo_terminate();
  734. event_terminate();
  735. #endif
  736. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
  737. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  738. audio_object_close(my_audio);
  739. audio_object_destroy(my_audio);
  740. my_audio = NULL;
  741. #endif
  742. out_samplerate = 0;
  743. }
  744. free(event_list);
  745. event_list = NULL;
  746. free(outbuf);
  747. outbuf = NULL;
  748. FreePhData();
  749. FreeVoiceList();
  750. translator = NULL;
  751. if (p_decoder != NULL) {
  752. destroy_text_decoder(p_decoder);
  753. p_decoder = NULL;
  754. }
  755. return ENS_OK;
  756. }
  757. ESPEAK_API const char *espeak_Info(const char **ptr)
  758. {
  759. if (ptr != NULL)
  760. *ptr = path_home;
  761. return version_string;
  762. }
  763. #pragma GCC visibility pop