eSpeak NG is an open source speech synthesizer that supports more than hundred languages and accents.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

speech.c 25KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962
  1. /*
  2. * Copyright (C) 2005 to 2013 by Jonathan Duddington
  3. * email: [email protected]
  4. * Copyright (C) 2013-2017 Reece H. Dunn
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 3 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see: <http://www.gnu.org/licenses/>.
  18. */
  19. #include "config.h"
  20. #include <assert.h>
  21. #include <ctype.h>
  22. #include <errno.h>
  23. #include <locale.h>
  24. #include <stdbool.h>
  25. #include <stdint.h>
  26. #include <stdio.h>
  27. #include <stdlib.h>
  28. #include <string.h>
  29. #include <sys/stat.h>
  30. #include <time.h>
  31. #include <unistd.h>
  32. #include <wchar.h>
  33. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  34. #include <pcaudiolib/audio.h>
  35. #endif
  36. #if defined(_WIN32) || defined(_WIN64)
  37. #include <fcntl.h>
  38. #include <io.h>
  39. #include <windows.h>
  40. #include <winreg.h>
  41. #endif
  42. #include <espeak-ng/espeak_ng.h>
  43. #include <espeak-ng/speak_lib.h>
  44. #include <espeak-ng/encoding.h>
  45. #include "dictionary.h"
  46. #include "mbrola.h"
  47. #include "readclause.h"
  48. #include "synthdata.h"
  49. #include "wavegen.h"
  50. #include "speech.h"
  51. #include "phoneme.h"
  52. #include "voice.h"
  53. #include "synthesize.h"
  54. #include "translate.h"
  55. #include "espeak_command.h"
  56. #include "fifo.h"
  57. #include "event.h"
  58. unsigned char *outbuf = NULL;
  59. int outbuf_size = 0;
  60. espeak_EVENT *event_list = NULL;
  61. int event_list_ix = 0;
  62. int n_event_list;
  63. long count_samples;
  64. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  65. struct audio_object *my_audio = NULL;
  66. #endif
  67. static const char *option_device = NULL;
  68. static unsigned int my_unique_identifier = 0;
  69. static void *my_user_data = NULL;
  70. static espeak_ng_OUTPUT_MODE my_mode = ENOUTPUT_MODE_SYNCHRONOUS;
  71. static int out_samplerate = 0;
  72. static int voice_samplerate = 22050;
  73. static espeak_ng_STATUS err = ENS_OK;
  74. t_espeak_callback *synth_callback = NULL;
  75. int (*uri_callback)(int, const char *, const char *) = NULL;
  76. int (*phoneme_callback)(const char *) = NULL;
  77. char path_home[N_PATH_HOME]; // this is the espeak-ng-data directory
  78. extern int saved_parameters[N_SPEECH_PARAM]; // Parameters saved on synthesis start
  79. void cancel_audio(void)
  80. {
  81. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  82. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
  83. audio_object_flush(my_audio);
  84. }
  85. #endif
  86. }
  87. static int dispatch_audio(short *outbuf, int length, espeak_EVENT *event)
  88. {
  89. int a_wave_can_be_played = 1;
  90. #ifdef USE_ASYNC
  91. if ((my_mode & ENOUTPUT_MODE_SYNCHRONOUS) == 0)
  92. a_wave_can_be_played = fifo_is_command_enabled();
  93. #endif
  94. switch ((int)my_mode)
  95. {
  96. case ENOUTPUT_MODE_SPEAK_AUDIO:
  97. case ENOUTPUT_MODE_SPEAK_AUDIO | ENOUTPUT_MODE_SYNCHRONOUS:
  98. {
  99. int event_type = 0;
  100. if (event)
  101. event_type = event->type;
  102. if (event_type == espeakEVENT_SAMPLERATE) {
  103. voice_samplerate = event->id.number;
  104. if (out_samplerate != voice_samplerate) {
  105. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  106. if (out_samplerate != 0) {
  107. // sound was previously open with a different sample rate
  108. audio_object_close(my_audio);
  109. out_samplerate = 0;
  110. #ifdef HAVE_SLEEP
  111. sleep(1);
  112. #endif
  113. }
  114. #endif
  115. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  116. int error = audio_object_open(my_audio, AUDIO_OBJECT_FORMAT_S16LE, voice_samplerate, 1);
  117. if (error != 0) {
  118. fprintf(stderr, "error: %s\n", audio_object_strerror(my_audio, error));
  119. err = ENS_AUDIO_ERROR;
  120. return -1;
  121. }
  122. #endif
  123. out_samplerate = voice_samplerate;
  124. #ifdef USE_ASYNC
  125. if ((my_mode & ENOUTPUT_MODE_SYNCHRONOUS) == 0)
  126. event_init();
  127. #endif
  128. }
  129. }
  130. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  131. if (out_samplerate == 0) {
  132. int error = audio_object_open(my_audio, AUDIO_OBJECT_FORMAT_S16LE, voice_samplerate, 1);
  133. if (error != 0) {
  134. fprintf(stderr, "error: %s\n", audio_object_strerror(my_audio, error));
  135. err = ENS_AUDIO_ERROR;
  136. return -1;
  137. }
  138. out_samplerate = voice_samplerate;
  139. }
  140. #endif
  141. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  142. if (outbuf && length && a_wave_can_be_played) {
  143. int error = audio_object_write(my_audio, (char *)outbuf, 2*length);
  144. if (error != 0)
  145. fprintf(stderr, "error: %s\n", audio_object_strerror(my_audio, error));
  146. }
  147. #endif
  148. #ifdef USE_ASYNC
  149. while (event && a_wave_can_be_played) {
  150. // TBD: some event are filtered here but some insight might be given
  151. // TBD: in synthesise.cpp for avoiding to create WORDs with size=0.
  152. // TBD: For example sentence "or ALT)." returns three words
  153. // "or", "ALT" and "".
  154. // TBD: the last one has its size=0.
  155. if ((event->type == espeakEVENT_WORD) && (event->length == 0))
  156. break;
  157. if ((my_mode & ENOUTPUT_MODE_SYNCHRONOUS) == 0) {
  158. err = event_declare(event);
  159. if (err != ENS_EVENT_BUFFER_FULL)
  160. break;
  161. usleep(10000);
  162. a_wave_can_be_played = fifo_is_command_enabled();
  163. } else
  164. break;
  165. }
  166. #endif
  167. }
  168. break;
  169. case 0:
  170. if (synth_callback)
  171. synth_callback(outbuf, length, event);
  172. break;
  173. }
  174. return a_wave_can_be_played == 0; // 1 = stop synthesis, -1 = error
  175. }
  176. static int create_events(short *outbuf, int length, espeak_EVENT *event_list)
  177. {
  178. int finished;
  179. int i = 0;
  180. // The audio data are written to the output device.
  181. // The list of events in event_list (index: event_list_ix) is read:
  182. // Each event is declared to the "event" object which stores them internally.
  183. // The event object is responsible of calling the external callback
  184. // as soon as the relevant audio sample is played.
  185. do { // for each event
  186. espeak_EVENT *event;
  187. if (event_list_ix == 0)
  188. event = NULL;
  189. else
  190. event = event_list + i;
  191. finished = dispatch_audio((short *)outbuf, length, event);
  192. length = 0; // the wave data are played once.
  193. i++;
  194. } while ((i < event_list_ix) && !finished);
  195. return finished;
  196. }
  197. #ifdef USE_ASYNC
  198. int sync_espeak_terminated_msg(uint32_t unique_identifier, void *user_data)
  199. {
  200. int finished = 0;
  201. memset(event_list, 0, 2*sizeof(espeak_EVENT));
  202. event_list[0].type = espeakEVENT_MSG_TERMINATED;
  203. event_list[0].unique_identifier = unique_identifier;
  204. event_list[0].user_data = user_data;
  205. event_list[1].type = espeakEVENT_LIST_TERMINATED;
  206. event_list[1].unique_identifier = unique_identifier;
  207. event_list[1].user_data = user_data;
  208. if (my_mode == ENOUTPUT_MODE_SPEAK_AUDIO) {
  209. while (1) {
  210. err = event_declare(event_list);
  211. if (err != ENS_EVENT_BUFFER_FULL)
  212. break;
  213. usleep(10000);
  214. }
  215. } else if (synth_callback)
  216. finished = synth_callback(NULL, 0, event_list);
  217. return finished;
  218. }
  219. #endif
  220. static int check_data_path(const char *path, int allow_directory)
  221. {
  222. if (!path) return 0;
  223. snprintf(path_home, sizeof(path_home), "%s/espeak-ng-data", path);
  224. if (GetFileLength(path_home) == -EISDIR)
  225. return 1;
  226. if (!allow_directory)
  227. return 0;
  228. snprintf(path_home, sizeof(path_home), "%s", path);
  229. return GetFileLength(path_home) == -EISDIR;
  230. }
  231. #pragma GCC visibility push(default)
  232. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_InitializeOutput(espeak_ng_OUTPUT_MODE output_mode, int buffer_length, const char *device)
  233. {
  234. option_device = device;
  235. my_mode = output_mode;
  236. out_samplerate = 0;
  237. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  238. if (my_audio == NULL)
  239. my_audio = create_audio_device_object(device, "eSpeak", "Text-to-Speech");
  240. #endif
  241. // buffer_length is in mS, allocate 2 bytes per sample
  242. if (buffer_length == 0)
  243. buffer_length = 60;
  244. outbuf_size = (buffer_length * samplerate)/500;
  245. out_start = (unsigned char *)realloc(outbuf, outbuf_size);
  246. if (out_start == NULL)
  247. return ENOMEM;
  248. else
  249. outbuf = out_start;
  250. // allocate space for event list. Allow 200 events per second.
  251. // Add a constant to allow for very small buffer_length
  252. n_event_list = (buffer_length*200)/1000 + 20;
  253. espeak_EVENT *new_event_list = (espeak_EVENT *)realloc(event_list, sizeof(espeak_EVENT) * n_event_list);
  254. if (new_event_list == NULL)
  255. return ENOMEM;
  256. event_list = new_event_list;
  257. return ENS_OK;
  258. }
  259. int GetFileLength(const char *filename)
  260. {
  261. struct stat statbuf;
  262. if (stat(filename, &statbuf) != 0)
  263. return -errno;
  264. if (S_ISDIR(statbuf.st_mode))
  265. return -EISDIR;
  266. return statbuf.st_size;
  267. }
  268. ESPEAK_NG_API void espeak_ng_InitializePath(const char *path)
  269. {
  270. if (check_data_path(path, 1))
  271. return;
  272. #ifdef PLATFORM_WINDOWS
  273. HKEY RegKey;
  274. unsigned long size;
  275. unsigned long var_type;
  276. unsigned char buf[sizeof(path_home)-13];
  277. if (check_data_path(getenv("ESPEAK_DATA_PATH"), 1))
  278. return;
  279. buf[0] = 0;
  280. RegOpenKeyExA(HKEY_LOCAL_MACHINE, "Software\\eSpeak NG", 0, KEY_READ, &RegKey);
  281. if (RegKey == NULL)
  282. RegOpenKeyExA(HKEY_LOCAL_MACHINE, "Software\\WOW6432Node\\eSpeak NG", 0, KEY_READ, &RegKey);
  283. size = sizeof(buf);
  284. var_type = REG_SZ;
  285. RegQueryValueExA(RegKey, "Path", 0, &var_type, buf, &size);
  286. if (check_data_path(buf, 1))
  287. return;
  288. #elif !defined(PLATFORM_DOS)
  289. if (check_data_path(getenv("ESPEAK_DATA_PATH"), 1))
  290. return;
  291. if (check_data_path(getenv("HOME"), 0))
  292. return;
  293. #endif
  294. strcpy(path_home, PATH_ESPEAK_DATA);
  295. }
  296. const int param_defaults[N_SPEECH_PARAM] = {
  297. 0, // silence (internal use)
  298. espeakRATE_NORMAL, // rate wpm
  299. 100, // volume
  300. 50, // pitch
  301. 50, // range
  302. 0, // punctuation
  303. 0, // capital letters
  304. 0, // wordgap
  305. 0, // options
  306. 0, // intonation
  307. 0,
  308. 0,
  309. 0, // emphasis
  310. 0, // line length
  311. 0, // voice type
  312. };
  313. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Initialize(espeak_ng_ERROR_CONTEXT *context)
  314. {
  315. int param;
  316. int srate = 22050; // default sample rate 22050 Hz
  317. // It seems that the wctype functions don't work until the locale has been set
  318. // to something other than the default "C". Then, not only Latin1 but also the
  319. // other characters give the correct results with iswalpha() etc.
  320. if (setlocale(LC_CTYPE, "C.UTF-8") == NULL) {
  321. if (setlocale(LC_CTYPE, "UTF-8") == NULL) {
  322. if (setlocale(LC_CTYPE, "en_US.UTF-8") == NULL)
  323. setlocale(LC_CTYPE, "");
  324. }
  325. }
  326. espeak_ng_STATUS result = LoadPhData(&srate, context);
  327. if (result != ENS_OK)
  328. return result;
  329. WavegenInit(srate, 0);
  330. LoadConfig();
  331. memset(&current_voice_selected, 0, sizeof(current_voice_selected));
  332. SetVoiceStack(NULL, "");
  333. SynthesizeInit();
  334. InitNamedata();
  335. VoiceReset(0);
  336. for (param = 0; param < N_SPEECH_PARAM; param++)
  337. param_stack[0].parameter[param] = saved_parameters[param] = param_defaults[param];
  338. SetParameter(espeakRATE, espeakRATE_NORMAL, 0);
  339. SetParameter(espeakVOLUME, 100, 0);
  340. SetParameter(espeakCAPITALS, option_capitals, 0);
  341. SetParameter(espeakPUNCTUATION, option_punctuation, 0);
  342. SetParameter(espeakWORDGAP, 0, 0);
  343. #ifdef USE_ASYNC
  344. fifo_init();
  345. #endif
  346. option_phonemes = 0;
  347. option_phoneme_events = 0;
  348. return ENS_OK;
  349. }
  350. ESPEAK_NG_API int espeak_ng_GetSampleRate(void)
  351. {
  352. return samplerate;
  353. }
  354. #pragma GCC visibility pop
  355. static espeak_ng_STATUS Synthesize(unsigned int unique_identifier, const void *text, int flags)
  356. {
  357. // Fill the buffer with output sound
  358. int length;
  359. int finished = 0;
  360. int count_buffers = 0;
  361. if ((outbuf == NULL) || (event_list == NULL))
  362. return ENS_NOT_INITIALIZED;
  363. option_ssml = flags & espeakSSML;
  364. option_phoneme_input = flags & espeakPHONEMES;
  365. option_endpause = flags & espeakENDPAUSE;
  366. count_samples = 0;
  367. espeak_ng_STATUS status;
  368. if (translator == NULL) {
  369. status = espeak_ng_SetVoiceByName("en");
  370. if (status != ENS_OK)
  371. return status;
  372. }
  373. if (p_decoder == NULL)
  374. p_decoder = create_text_decoder();
  375. status = text_decoder_decode_string_multibyte(p_decoder, text, translator->encoding, flags);
  376. if (status != ENS_OK)
  377. return status;
  378. SpeakNextClause(0);
  379. for (;;) {
  380. out_ptr = outbuf;
  381. out_end = &outbuf[outbuf_size];
  382. event_list_ix = 0;
  383. WavegenFill();
  384. length = (out_ptr - outbuf)/2;
  385. count_samples += length;
  386. event_list[event_list_ix].type = espeakEVENT_LIST_TERMINATED; // indicates end of event list
  387. event_list[event_list_ix].unique_identifier = unique_identifier;
  388. event_list[event_list_ix].user_data = my_user_data;
  389. count_buffers++;
  390. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
  391. finished = create_events((short *)outbuf, length, event_list);
  392. if (finished < 0)
  393. return ENS_AUDIO_ERROR;
  394. } else if (synth_callback)
  395. finished = synth_callback((short *)outbuf, length, event_list);
  396. if (finished) {
  397. SpeakNextClause(2); // stop
  398. return ENS_SPEECH_STOPPED;
  399. }
  400. if (Generate(phoneme_list, &n_phoneme_list, 1) == 0) {
  401. if (WcmdqUsed() == 0) {
  402. // don't process the next clause until the previous clause has finished generating speech.
  403. // This ensures that <audio> tag (which causes end-of-clause) is at a sound buffer boundary
  404. event_list[0].type = espeakEVENT_LIST_TERMINATED;
  405. event_list[0].unique_identifier = my_unique_identifier;
  406. event_list[0].user_data = my_user_data;
  407. if (SpeakNextClause(1) == 0) {
  408. finished = 0;
  409. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
  410. if (dispatch_audio(NULL, 0, NULL) < 0)
  411. return ENS_AUDIO_ERROR;
  412. } else if (synth_callback)
  413. finished = synth_callback(NULL, 0, event_list); // NULL buffer ptr indicates end of data
  414. if (finished) {
  415. SpeakNextClause(2); // stop
  416. return ENS_SPEECH_STOPPED;
  417. }
  418. return ENS_OK;
  419. }
  420. }
  421. }
  422. }
  423. }
  424. void MarkerEvent(int type, unsigned int char_position, int value, int value2, unsigned char *out_ptr)
  425. {
  426. // type: 1=word, 2=sentence, 3=named mark, 4=play audio, 5=end, 7=phoneme
  427. espeak_EVENT *ep;
  428. double time;
  429. if ((event_list == NULL) || (event_list_ix >= (n_event_list-2)))
  430. return;
  431. ep = &event_list[event_list_ix++];
  432. ep->type = (espeak_EVENT_TYPE)type;
  433. ep->unique_identifier = my_unique_identifier;
  434. ep->user_data = my_user_data;
  435. ep->text_position = char_position & 0xffffff;
  436. ep->length = char_position >> 24;
  437. time = ((double)(count_samples + mbrola_delay + (out_ptr - out_start)/2)*1000.0)/samplerate;
  438. ep->audio_position = (int)time;
  439. ep->sample = (count_samples + mbrola_delay + (out_ptr - out_start)/2);
  440. if ((type == espeakEVENT_MARK) || (type == espeakEVENT_PLAY))
  441. ep->id.name = &namedata[value];
  442. else if (type == espeakEVENT_PHONEME) {
  443. int *p;
  444. p = (int *)(ep->id.string);
  445. p[0] = value;
  446. p[1] = value2;
  447. } else
  448. ep->id.number = value;
  449. }
  450. espeak_ng_STATUS sync_espeak_Synth(unsigned int unique_identifier, const void *text,
  451. unsigned int position, espeak_POSITION_TYPE position_type,
  452. unsigned int end_position, unsigned int flags, void *user_data)
  453. {
  454. InitText(flags);
  455. my_unique_identifier = unique_identifier;
  456. my_user_data = user_data;
  457. for (int i = 0; i < N_SPEECH_PARAM; i++)
  458. saved_parameters[i] = param_stack[0].parameter[i];
  459. switch (position_type)
  460. {
  461. case POS_CHARACTER:
  462. skip_characters = position;
  463. break;
  464. case POS_WORD:
  465. skip_words = position;
  466. break;
  467. case POS_SENTENCE:
  468. skip_sentences = position;
  469. break;
  470. }
  471. if (skip_characters || skip_words || skip_sentences)
  472. skipping_text = true;
  473. end_character_position = end_position;
  474. espeak_ng_STATUS aStatus = Synthesize(unique_identifier, text, flags);
  475. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  476. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
  477. int error = (aStatus == ENS_SPEECH_STOPPED)
  478. ? audio_object_flush(my_audio)
  479. : audio_object_drain(my_audio);
  480. if (error != 0)
  481. fprintf(stderr, "error: %s\n", audio_object_strerror(my_audio, error));
  482. }
  483. #endif
  484. return aStatus;
  485. }
  486. espeak_ng_STATUS sync_espeak_Synth_Mark(unsigned int unique_identifier, const void *text,
  487. const char *index_mark, unsigned int end_position,
  488. unsigned int flags, void *user_data)
  489. {
  490. InitText(flags);
  491. my_unique_identifier = unique_identifier;
  492. my_user_data = user_data;
  493. if (index_mark != NULL) {
  494. strncpy0(skip_marker, index_mark, sizeof(skip_marker));
  495. skipping_text = true;
  496. }
  497. end_character_position = end_position;
  498. return Synthesize(unique_identifier, text, flags | espeakSSML);
  499. }
  500. espeak_ng_STATUS sync_espeak_Key(const char *key)
  501. {
  502. // symbolic name, symbolicname_character - is there a system resource of symbolic names per language?
  503. int letter;
  504. int ix;
  505. ix = utf8_in(&letter, key);
  506. if (key[ix] == 0) // a single character
  507. return sync_espeak_Char(letter);
  508. my_unique_identifier = 0;
  509. my_user_data = NULL;
  510. return Synthesize(0, key, 0); // speak key as a text string
  511. }
  512. espeak_ng_STATUS sync_espeak_Char(wchar_t character)
  513. {
  514. // is there a system resource of character names per language?
  515. char buf[80];
  516. my_unique_identifier = 0;
  517. my_user_data = NULL;
  518. sprintf(buf, "<say-as interpret-as=\"tts:char\">&#%d;</say-as>", character);
  519. return Synthesize(0, buf, espeakSSML);
  520. }
  521. void sync_espeak_SetPunctuationList(const wchar_t *punctlist)
  522. {
  523. // Set the list of punctuation which are spoken for "some".
  524. my_unique_identifier = 0;
  525. my_user_data = NULL;
  526. option_punctlist[0] = 0;
  527. if (punctlist != NULL) {
  528. wcsncpy(option_punctlist, punctlist, N_PUNCTLIST);
  529. option_punctlist[N_PUNCTLIST-1] = 0;
  530. }
  531. }
  532. #pragma GCC visibility push(default)
  533. ESPEAK_API void espeak_SetSynthCallback(t_espeak_callback *SynthCallback)
  534. {
  535. synth_callback = SynthCallback;
  536. #ifdef USE_ASYNC
  537. event_set_callback(synth_callback);
  538. #endif
  539. }
  540. ESPEAK_API void espeak_SetUriCallback(int (*UriCallback)(int, const char *, const char *))
  541. {
  542. uri_callback = UriCallback;
  543. }
  544. ESPEAK_API void espeak_SetPhonemeCallback(int (*PhonemeCallback)(const char *))
  545. {
  546. phoneme_callback = PhonemeCallback;
  547. }
  548. ESPEAK_NG_API espeak_ng_STATUS
  549. espeak_ng_Synthesize(const void *text, size_t size,
  550. unsigned int position,
  551. espeak_POSITION_TYPE position_type,
  552. unsigned int end_position, unsigned int flags,
  553. unsigned int *unique_identifier, void *user_data)
  554. {
  555. (void)size; // unused in non-async modes
  556. static unsigned int temp_identifier;
  557. if (unique_identifier == NULL)
  558. unique_identifier = &temp_identifier;
  559. *unique_identifier = 0;
  560. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  561. return sync_espeak_Synth(0, text, position, position_type, end_position, flags, user_data);
  562. #ifdef USE_ASYNC
  563. // Create the text command
  564. t_espeak_command *c1 = create_espeak_text(text, size, position, position_type, end_position, flags, user_data);
  565. if (c1) {
  566. // Retrieve the unique identifier
  567. *unique_identifier = c1->u.my_text.unique_identifier;
  568. }
  569. // Create the "terminated msg" command (same uid)
  570. t_espeak_command *c2 = create_espeak_terminated_msg(*unique_identifier, user_data);
  571. // Try to add these 2 commands (single transaction)
  572. if (c1 && c2) {
  573. espeak_ng_STATUS status = fifo_add_commands(c1, c2);
  574. if (status != ENS_OK) {
  575. delete_espeak_command(c1);
  576. delete_espeak_command(c2);
  577. }
  578. return status;
  579. }
  580. delete_espeak_command(c1);
  581. delete_espeak_command(c2);
  582. return ENOMEM;
  583. #else
  584. return sync_espeak_Synth(0, text, position, position_type, end_position, flags, user_data);
  585. #endif
  586. }
  587. ESPEAK_NG_API espeak_ng_STATUS
  588. espeak_ng_SynthesizeMark(const void *text,
  589. size_t size,
  590. const char *index_mark,
  591. unsigned int end_position,
  592. unsigned int flags,
  593. unsigned int *unique_identifier,
  594. void *user_data)
  595. {
  596. (void)size; // unused in non-async modes
  597. static unsigned int temp_identifier;
  598. if (unique_identifier == NULL)
  599. unique_identifier = &temp_identifier;
  600. *unique_identifier = 0;
  601. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  602. return sync_espeak_Synth_Mark(0, text, index_mark, end_position, flags, user_data);
  603. #ifdef USE_ASYNC
  604. // Create the mark command
  605. t_espeak_command *c1 = create_espeak_mark(text, size, index_mark, end_position,
  606. flags, user_data);
  607. if (c1) {
  608. // Retrieve the unique identifier
  609. *unique_identifier = c1->u.my_mark.unique_identifier;
  610. }
  611. // Create the "terminated msg" command (same uid)
  612. t_espeak_command *c2 = create_espeak_terminated_msg(*unique_identifier, user_data);
  613. // Try to add these 2 commands (single transaction)
  614. if (c1 && c2) {
  615. espeak_ng_STATUS status = fifo_add_commands(c1, c2);
  616. if (status != ENS_OK) {
  617. delete_espeak_command(c1);
  618. delete_espeak_command(c2);
  619. }
  620. return status;
  621. }
  622. delete_espeak_command(c1);
  623. delete_espeak_command(c2);
  624. return ENOMEM;
  625. #else
  626. return sync_espeak_Synth_Mark(0, text, index_mark, end_position, flags, user_data);
  627. #endif
  628. }
  629. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SpeakKeyName(const char *key_name)
  630. {
  631. // symbolic name, symbolicname_character - is there a system resource of symbolicnames per language
  632. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  633. return sync_espeak_Key(key_name);
  634. #ifdef USE_ASYNC
  635. t_espeak_command *c = create_espeak_key(key_name, NULL);
  636. espeak_ng_STATUS status = fifo_add_command(c);
  637. if (status != ENS_OK)
  638. delete_espeak_command(c);
  639. return status;
  640. #else
  641. return sync_espeak_Key(key_name);
  642. #endif
  643. }
  644. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SpeakCharacter(wchar_t character)
  645. {
  646. // is there a system resource of character names per language?
  647. #ifdef USE_ASYNC
  648. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  649. return sync_espeak_Char(character);
  650. t_espeak_command *c = create_espeak_char(character, NULL);
  651. espeak_ng_STATUS status = fifo_add_command(c);
  652. if (status != ENS_OK)
  653. delete_espeak_command(c);
  654. return status;
  655. #else
  656. return sync_espeak_Char(character);
  657. #endif
  658. }
  659. ESPEAK_API int espeak_GetParameter(espeak_PARAMETER parameter, int current)
  660. {
  661. // current: 0=default value, 1=current value
  662. if (current)
  663. return param_stack[0].parameter[parameter];
  664. return param_defaults[parameter];
  665. }
  666. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SetParameter(espeak_PARAMETER parameter, int value, int relative)
  667. {
  668. #ifdef USE_ASYNC
  669. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS)
  670. return SetParameter(parameter, value, relative);
  671. t_espeak_command *c = create_espeak_parameter(parameter, value, relative);
  672. espeak_ng_STATUS status = fifo_add_command(c);
  673. if (status != ENS_OK)
  674. delete_espeak_command(c);
  675. return status;
  676. #else
  677. return SetParameter(parameter, value, relative);
  678. #endif
  679. }
  680. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SetPunctuationList(const wchar_t *punctlist)
  681. {
  682. // Set the list of punctuation which are spoken for "some".
  683. #ifdef USE_ASYNC
  684. if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS) {
  685. sync_espeak_SetPunctuationList(punctlist);
  686. return ENS_OK;
  687. }
  688. t_espeak_command *c = create_espeak_punctuation_list(punctlist);
  689. espeak_ng_STATUS status = fifo_add_command(c);
  690. if (status != ENS_OK)
  691. delete_espeak_command(c);
  692. return status;
  693. #else
  694. sync_espeak_SetPunctuationList(punctlist);
  695. return ENS_OK;
  696. #endif
  697. }
  698. ESPEAK_API void espeak_SetPhonemeTrace(int phonememode, FILE *stream)
  699. {
  700. /* phonememode: Controls the output of phoneme symbols for the text
  701. bits 0-2:
  702. value=0 No phoneme output (default)
  703. value=1 Output the translated phoneme symbols for the text
  704. value=2 as (1), but produces IPA phoneme names rather than ascii
  705. bit 3: output a trace of how the translation was done (showing the matching rules and list entries)
  706. bit 4: produce pho data for mbrola
  707. bit 7: use (bits 8-23) as a tie within multi-letter phonemes names
  708. bits 8-23: separator character, between phoneme names
  709. stream output stream for the phoneme symbols (and trace). If stream=NULL then it uses stdout.
  710. */
  711. option_phonemes = phonememode;
  712. f_trans = stream;
  713. if (stream == NULL)
  714. f_trans = stderr;
  715. }
  716. ESPEAK_API const char *espeak_TextToPhonemes(const void **textptr, int textmode, int phonememode)
  717. {
  718. /* phoneme_mode
  719. bit 1: 0=eSpeak's ascii phoneme names, 1= International Phonetic Alphabet (as UTF-8 characters).
  720. bit 7: use (bits 8-23) as a tie within multi-letter phonemes names
  721. bits 8-23: separator character, between phoneme names
  722. */
  723. if (p_decoder == NULL)
  724. p_decoder = create_text_decoder();
  725. if (text_decoder_decode_string_multibyte(p_decoder, *textptr, translator->encoding, textmode) != ENS_OK)
  726. return NULL;
  727. TranslateClause(translator, NULL, NULL);
  728. *textptr = text_decoder_get_buffer(p_decoder);
  729. return GetTranslatedPhonemeString(phonememode);
  730. }
  731. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Cancel(void)
  732. {
  733. #ifdef USE_ASYNC
  734. fifo_stop();
  735. event_clear_all();
  736. #endif
  737. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  738. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO)
  739. audio_object_flush(my_audio);
  740. #endif
  741. embedded_value[EMBED_T] = 0; // reset echo for pronunciation announcements
  742. for (int i = 0; i < N_SPEECH_PARAM; i++)
  743. SetParameter(i, saved_parameters[i], 0);
  744. return ENS_OK;
  745. }
  746. ESPEAK_API int espeak_IsPlaying(void)
  747. {
  748. #ifdef USE_ASYNC
  749. return fifo_is_busy();
  750. #else
  751. return 0;
  752. #endif
  753. }
  754. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Synchronize(void)
  755. {
  756. espeak_ng_STATUS berr = err;
  757. #ifdef USE_ASYNC
  758. while (espeak_IsPlaying())
  759. usleep(20000);
  760. #endif
  761. err = ENS_OK;
  762. return berr;
  763. }
  764. ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Terminate(void)
  765. {
  766. #ifdef USE_ASYNC
  767. fifo_stop();
  768. fifo_terminate();
  769. event_terminate();
  770. #endif
  771. if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) {
  772. #ifdef HAVE_PCAUDIOLIB_AUDIO_H
  773. audio_object_close(my_audio);
  774. audio_object_destroy(my_audio);
  775. my_audio = NULL;
  776. #endif
  777. out_samplerate = 0;
  778. }
  779. free(event_list);
  780. event_list = NULL;
  781. free(outbuf);
  782. outbuf = NULL;
  783. FreePhData();
  784. FreeVoiceList();
  785. DeleteTranslator(translator);
  786. translator = NULL;
  787. if (p_decoder != NULL) {
  788. destroy_text_decoder(p_decoder);
  789. p_decoder = NULL;
  790. }
  791. return ENS_OK;
  792. }
  793. const char *version_string = PACKAGE_VERSION;
  794. ESPEAK_API const char *espeak_Info(const char **ptr)
  795. {
  796. if (ptr != NULL)
  797. *ptr = path_home;
  798. return version_string;
  799. }
  800. #pragma GCC visibility pop