eSpeak NG is an open source speech synthesizer that supports more than hundred languages and accents.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

ttsengobj.cpp 23KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928
  1. /***************************************************************************
  2. * Copyright (C) 2005 to 2007 by Jonathan Duddington *
  3. * email: [email protected] *
  4. * *
  5. * This program is free software; you can redistribute it and/or modify *
  6. * it under the terms of the GNU General Public License as published by *
  7. * the Free Software Foundation; either version 3 of the License, or *
  8. * (at your option) any later version. *
  9. * *
  10. * This program is distributed in the hope that it will be useful, *
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  13. * GNU General Public License for more details. *
  14. * *
  15. * You should have received a copy of the GNU General Public License *
  16. * along with this program; if not, write see: *
  17. * <http://www.gnu.org/licenses/>. *
  18. ***************************************************************************/
  19. #include "stdafx.h"
  20. #include "TtsEngObj.h"
  21. #include "src/speak_lib.h"
  22. #include "stdio.h"
  23. #define CTRL_EMBEDDED 1
  24. CTTSEngObj *m_EngObj;
  25. ISpTTSEngineSite* m_OutputSite;
  26. FILE *f_log2=NULL;
  27. ULONGLONG event_interest;
  28. extern int AddNameData(const char *name, int wide);
  29. extern void InitNamedata(void);
  30. int master_volume = 100;
  31. int master_rate = 0;
  32. int initialised = 0;
  33. int gVolume = 100;
  34. int gSpeed = -1;
  35. int gPitch = -1;
  36. int gRange = -1;
  37. int gEmphasis = 0;
  38. int gSayas = 0;
  39. char g_voice_name[80];
  40. char *path_install = NULL;
  41. unsigned long audio_offset = 0;
  42. unsigned long audio_latest = 0;
  43. int prev_phoneme = 0;
  44. int prev_phoneme_position = 0;
  45. unsigned long prev_phoneme_time = 0;
  46. unsigned int gBufSize = 0;
  47. wchar_t *TextBuf=NULL;
  48. typedef struct {
  49. unsigned int bufix;
  50. unsigned int textix;
  51. unsigned int cmdlen;
  52. } FRAG_OFFSET;
  53. int srate; // samplerate, Hz/50
  54. int n_frag_offsets = 0;
  55. int frag_ix = 0;
  56. int frag_count=0;
  57. FRAG_OFFSET *frag_offsets = NULL;
  58. //#define TEST_INPUT // printf input text received from SAPI to espeak_text_log.txt
  59. #ifdef TEST_INPUT
  60. static int utf8_out(unsigned int c, char *buf)
  61. {//====================================
  62. // write a unicode character into a buffer as utf8
  63. // returns the number of bytes written
  64. int n_bytes;
  65. int j;
  66. int shift;
  67. static char unsigned code[4] = {0,0xc0,0xe0,0xf0};
  68. if(c < 0x80)
  69. {
  70. buf[0] = c;
  71. return(1);
  72. }
  73. if(c >= 0x110000)
  74. {
  75. buf[0] = ' '; // out of range character code
  76. return(1);
  77. }
  78. if(c < 0x0800)
  79. n_bytes = 1;
  80. else
  81. if(c < 0x10000)
  82. n_bytes = 2;
  83. else
  84. n_bytes = 3;
  85. shift = 6*n_bytes;
  86. buf[0] = code[n_bytes] | (c >> shift);
  87. for(j=0; j<n_bytes; j++)
  88. {
  89. shift -= 6;
  90. buf[j+1] = 0x80 + ((c >> shift) & 0x3f);
  91. }
  92. return(n_bytes+1);
  93. } // end of utf8_out
  94. #endif
  95. int VisemeCode(unsigned int phoneme_name)
  96. {//======================================
  97. // Convert eSpeak phoneme name into a SAPI viseme code
  98. int ix;
  99. unsigned int ph;
  100. unsigned int ph_name;
  101. #define PH(c1,c2) (c2<<8)+c1 // combine two characters into an integer for phoneme name
  102. const unsigned char initial_to_viseme[128] = {
  103. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  104. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  105. 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,19, 0, 0, 0, 0, 0,
  106. 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,255,
  107. 4, 2,18,16,17, 4,18,20,12, 6,16,20,14,21,20, 3,
  108. 21,20,13,16,17, 4, 1, 5,20, 7,16, 0, 0, 0, 0, 0,
  109. 0, 1,21,16,19, 4,18,20,12, 6, 6,20,14,21,19, 8,
  110. 21,20,13,15,19, 7,18, 7,20, 7,15, 0, 0, 0, 0, 0 };
  111. const unsigned int viseme_exceptions[] = {
  112. PH('a','I'), 11,
  113. PH('a','U'), 9,
  114. PH('O','I'), 10,
  115. PH('t','S'), 16,
  116. PH('d','Z'), 16,
  117. PH('_','|'), 255,
  118. 0
  119. };
  120. ph_name = phoneme_name & 0xffff;
  121. for(ix=0; (ph = viseme_exceptions[ix]) != 0; ix+=2)
  122. {
  123. if(ph == ph_name)
  124. {
  125. return(viseme_exceptions[ix+1]);
  126. }
  127. }
  128. return(initial_to_viseme[phoneme_name & 0x7f]);
  129. }
  130. int SynthCallback(short *wav, int numsamples, espeak_EVENT *events);
  131. int SynthCallback(short *wav, int numsamples, espeak_EVENT *events)
  132. {//================================================================
  133. int hr;
  134. wchar_t *tailptr;
  135. unsigned int text_offset;
  136. int length;
  137. int phoneme_duration;
  138. int this_viseme;
  139. espeak_EVENT *event;
  140. #define N_EVENTS 100
  141. int n_Events = 0;
  142. SPEVENT *Event;
  143. SPEVENT Events[N_EVENTS];
  144. if(m_OutputSite->GetActions() & SPVES_ABORT)
  145. return(1);
  146. m_EngObj->CheckActions(m_OutputSite);
  147. // return the events
  148. for(event=events; (event->type != 0) && (n_Events < N_EVENTS); event++)
  149. {
  150. audio_latest = event->audio_position + audio_offset;
  151. if((event->type == espeakEVENT_WORD) && (event->length > 0))
  152. {
  153. while(((frag_ix+1) < frag_count) &&
  154. ((event->text_position -1 + frag_offsets[frag_ix+1].cmdlen) >= frag_offsets[frag_ix+1].bufix))
  155. {
  156. frag_ix++;
  157. }
  158. text_offset = frag_offsets[frag_ix].textix +
  159. event->text_position -1 - frag_offsets[frag_ix].bufix + frag_offsets[frag_ix].cmdlen;
  160. length = event->length - frag_offsets[frag_ix].cmdlen;
  161. frag_offsets[frag_ix].cmdlen = 0;
  162. if(text_offset < 0)
  163. text_offset = 0;
  164. Event = &Events[n_Events++];
  165. Event->eEventId = SPEI_WORD_BOUNDARY;
  166. Event->elParamType = SPET_LPARAM_IS_UNDEFINED;
  167. Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10; // ms -> bytes
  168. Event->lParam = text_offset;
  169. Event->wParam = length;
  170. }
  171. if(event->type == espeakEVENT_MARK)
  172. {
  173. Event = &Events[n_Events++];
  174. Event->eEventId = SPEI_TTS_BOOKMARK;
  175. Event->elParamType = SPET_LPARAM_IS_STRING;
  176. Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10; // ms -> bytes
  177. Event->lParam = (long)event->id.name;
  178. Event->wParam = wcstol((wchar_t *)event->id.name,&tailptr,10);
  179. }
  180. if(event->type == espeakEVENT_PHONEME)
  181. {
  182. if(event_interest & SPEI_VISEME)
  183. {
  184. phoneme_duration = audio_latest - prev_phoneme_time;
  185. // ignore some phonemes (which translate to viseme=255)
  186. if((this_viseme = VisemeCode(event->id.number)) != 255)
  187. {
  188. Event = &Events[n_Events++];
  189. Event->eEventId = SPEI_VISEME;
  190. Event->elParamType = SPET_LPARAM_IS_UNDEFINED;
  191. Event->ullAudioStreamOffset = ((prev_phoneme_position + audio_offset) * srate)/10; // ms -> bytes
  192. Event->lParam = phoneme_duration << 16 | this_viseme;
  193. Event->wParam = VisemeCode(prev_phoneme);
  194. prev_phoneme = event->id.number;
  195. prev_phoneme_time = audio_latest;
  196. prev_phoneme_position = event->audio_position;
  197. }
  198. }
  199. }
  200. #ifdef deleted
  201. if(event->type == espeakEVENT_SENTENCE)
  202. {
  203. Event = &Events[n_Events++];
  204. Event->eEventId = SPEI_SENTENCE_BOUNDARY;
  205. Event->elParamType = SPET_LPARAM_IS_UNDEFINED;
  206. Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10; // ms -> bytes
  207. Event->lParam = 0;
  208. Event->wParam = 0; // TEMP
  209. }
  210. #endif
  211. }
  212. if(n_Events > 0)
  213. m_OutputSite->AddEvents(Events, n_Events );
  214. // return the sound data
  215. hr = m_OutputSite->Write(wav, numsamples*2, NULL);
  216. return(hr);
  217. }
  218. static int ConvertRate(int new_rate)
  219. {//=================================
  220. int rate;
  221. static int rate_table[21] = {80,100,115,124,133,142,151,159,168,174,180,
  222. 187,196,208,220,240,270,300,335,369,390 };
  223. rate = new_rate + master_rate;
  224. if(rate < -10) rate = -10;
  225. if(rate > 10) rate = 10;
  226. return(rate_table[rate+10]);
  227. } // end of ConvertRate
  228. static int ConvertPitch(int pitch)
  229. {//===============================
  230. static int pitch_table[41] =
  231. {0, 0, 0, 0, 0, 0, 0, 0, 4, 8,12,16,20,24,28,32,36,40,44,47,50,
  232. 54,58,62,66,70,74,78,82,84,88,92,96,99,99,99,99,99,99,99,99};
  233. // {0,3,5,8,10,13,15,18,20,23,25,28,30,33,35,38,40,43,45,48,50,
  234. // 53,55,58,60,63,65,68,70,73,75,78,80,83,85,88,90,93,95,97,99};
  235. if(pitch < -20) pitch = -20;
  236. if(pitch > 20) pitch = 20;
  237. return(pitch_table[pitch+20]);
  238. }
  239. static int ConvertRange(int range)
  240. {//===============================
  241. static int range_table[21] = {16,28,39,49,58,66,74,81,88,94,100,105,110,115,120,125,130,135,140,145,150};
  242. if(range < -10) range = -10;
  243. if(range > 10) range = 10;
  244. return(range_table[range+10]/2);
  245. }
  246. HRESULT CTTSEngObj::FinalConstruct()
  247. {//=================================
  248. SPDBG_FUNC( "CTTSEngObj::FinalConstruct" );
  249. HRESULT hr = S_OK;
  250. #ifdef LOG_DEBUG
  251. f_log2=fopen("C:\\log_espeak","a");
  252. if(f_log2) fprintf(f_log2,"\n****\n");
  253. #endif
  254. //--- Init vars
  255. m_hVoiceData = NULL;
  256. m_pVoiceData = NULL;
  257. m_pWordList = NULL;
  258. m_ulNumWords = 0;
  259. m_EngObj = this;
  260. return hr;
  261. } /* CTTSEngObj::FinalConstruct */
  262. void CTTSEngObj::FinalRelease()
  263. {//============================
  264. SPDBG_FUNC( "CTTSEngObj::FinalRelease" );
  265. delete m_pWordList;
  266. #ifdef LOG_DEBUG
  267. if(f_log2!=NULL) fclose(f_log2);
  268. #endif
  269. if( m_pVoiceData )
  270. {
  271. ::UnmapViewOfFile( (void*)m_pVoiceData );
  272. }
  273. if( m_hVoiceData )
  274. {
  275. ::CloseHandle( m_hVoiceData );
  276. }
  277. } /* CTTSEngObj::FinalRelease */
  278. //
  279. //=== ISpObjectWithToken Implementation ======================================
  280. //
  281. void WcharToChar(char *out, const wchar_t *in, int len)
  282. {//====================================================
  283. int ix;
  284. for(ix=0; ix<len; ix++)
  285. {
  286. if((out[ix] = (char)in[ix]) == 0)
  287. break;
  288. }
  289. out[len-1] = 0;
  290. }
  291. /*****************************************************************************
  292. * CTTSEngObj::SetObjectToken *
  293. *----------------------------*
  294. * Description:
  295. * Read the "VoiceName" attribute from the registry, and use it to select
  296. * an eSpeak voice file
  297. *****************************************************************************/
  298. STDMETHODIMP CTTSEngObj::SetObjectToken(ISpObjectToken * pToken)
  299. {
  300. strcpy(voice_name,"default");
  301. SPDBG_FUNC( "CTTSEngObj::SetObjectToken" );
  302. HRESULT hr = SpGenericSetObjectToken(pToken, m_cpToken);
  303. if( SUCCEEDED( hr ) )
  304. {
  305. CSpDynamicString voicename;
  306. CSpDynamicString path;
  307. HRESULT hr2;
  308. int len;
  309. hr2 = m_cpToken->GetStringValue( L"VoiceName", &voicename);
  310. if( SUCCEEDED(hr2) )
  311. {
  312. WcharToChar(voice_name,voicename,sizeof(voice_name));
  313. }
  314. hr2 = m_cpToken->GetStringValue( L"Path", &path);
  315. if( SUCCEEDED(hr2) )
  316. {
  317. len = wcslen(path)+1;
  318. path_install = (char *)malloc(len);
  319. WcharToChar(path_install,path,len);
  320. }
  321. }
  322. gVolume = 100;
  323. gSpeed = -1;
  324. gPitch = -1;
  325. gRange = -1;
  326. gEmphasis = 0;
  327. gSayas = 0;
  328. if(initialised==0)
  329. {
  330. espeak_Initialize(AUDIO_OUTPUT_SYNCHRONOUS,100,path_install,1);
  331. espeak_SetSynthCallback(SynthCallback);
  332. initialised = 1;
  333. // g_voice_name[0] = 0;
  334. }
  335. strcpy(g_voice_name, voice_name);
  336. espeak_SetVoiceByName(g_voice_name);
  337. return hr;
  338. } /* CTTSEngObj::SetObjectToken */
  339. //
  340. //=== ISpTTSEngine Implementation ============================================
  341. //
  342. #define L(c1,c2) (c1<<8)+c2 // combine two characters into an integer
  343. static char *phoneme_names_en[] = {
  344. NULL,NULL,NULL," ",NULL,NULL,NULL,NULL,"'",",",
  345. "A:","a","V","0","aU","@","aI",
  346. "b","tS","d","D","E","3:","eI",
  347. "f","g","h","I","i:","dZ","k",
  348. "l","m","n","N","oU","OI","p",
  349. "r","s","S","t","T","U","u:",
  350. "v","w","j","z","Z",
  351. NULL
  352. };
  353. int CTTSEngObj::WritePhonemes(SPPHONEID *phons, wchar_t *pW)
  354. {//=========================================================
  355. int ph;
  356. int ix=2;
  357. int skip=0;
  358. int maxph = 49;
  359. char *p;
  360. int j;
  361. int lang;
  362. char **phoneme_names;
  363. char phbuf[200];
  364. espeak_VOICE *voice;
  365. voice = espeak_GetCurrentVoice();
  366. lang = (voice->languages[1] << 8) + (voice->languages[2]);
  367. phoneme_names = phoneme_names_en;
  368. maxph = 0;
  369. if(lang == L('e','n'))
  370. {
  371. phoneme_names = phoneme_names_en;
  372. maxph = 49;
  373. }
  374. if(maxph == 0)
  375. return(0);
  376. strcpy(phbuf,"[[");
  377. while(((ph = *phons++) != 0) && (ix < (sizeof(phbuf) - 3)))
  378. {
  379. if(skip)
  380. {
  381. skip = 0;
  382. continue;
  383. }
  384. if(ph > maxph)
  385. continue;
  386. p = phoneme_names[phons[0]]; // look at the phoneme after this one
  387. if(p != NULL)
  388. {
  389. if(p[0] == '\'')
  390. {
  391. phbuf[ix++] = '\''; // primary stress, put before the vowel, not after
  392. skip=1;
  393. }
  394. if(p[0] == ',')
  395. {
  396. phbuf[ix++] = ','; // secondary stress
  397. skip=1;
  398. }
  399. }
  400. p = phoneme_names[ph]; // look at this phoneme
  401. if(p != NULL)
  402. {
  403. strcpy(&phbuf[ix],p);
  404. ix += strlen(p);
  405. }
  406. }
  407. strcpy(&phbuf[ix],"]]");
  408. ix += 2;
  409. if(pW != NULL)
  410. {
  411. for(j=0; j<=ix; j++)
  412. {
  413. pW[j] = phbuf[j];
  414. }
  415. }
  416. return(strlen(phbuf));
  417. }
  418. int CTTSEngObj::ProcessFragList(const SPVTEXTFRAG* pTextFragList, wchar_t *pW_start, ISpTTSEngineSite* pOutputSite, int *n_text)
  419. {//============================================================================================================================
  420. int action;
  421. int control;
  422. wchar_t *pW;
  423. const SPVSTATE *state;
  424. unsigned int ix;
  425. unsigned int len;
  426. unsigned int total=0;
  427. char cmdbuf[50];
  428. wchar_t markbuf[32];
  429. int speed;
  430. int volume;
  431. int pitch;
  432. int range;
  433. int emphasis;
  434. int sayas;
  435. unsigned int text_offset = 0;
  436. frag_count = 0;
  437. frag_ix = 0;
  438. pW = pW_start;
  439. // check that the current voice is correct for this request
  440. if(strcmp(voice_name, g_voice_name) != 0)
  441. {
  442. strcpy(g_voice_name, voice_name);
  443. espeak_SetVoiceByName(g_voice_name);
  444. }
  445. while(pTextFragList != NULL)
  446. {
  447. action = pTextFragList->State.eAction;
  448. control = pOutputSite->GetActions();
  449. len = pTextFragList->ulTextLen;
  450. if(control & SPVES_ABORT)
  451. break;
  452. CheckActions(pOutputSite);
  453. sayas = 0;
  454. state = &pTextFragList->State;
  455. switch(action)
  456. {
  457. case SPVA_SpellOut:
  458. sayas = 0x12; // SAYAS_CHARS; // drop through to SPVA_Speak
  459. case SPVA_Speak:
  460. text_offset = pTextFragList->ulTextSrcOffset;
  461. audio_offset = audio_latest;
  462. #ifdef deleted
  463. // attempt to recognise when JAWS is spelling, it doesn't use SPVA_SpellOut
  464. if((pW != NULL) && (*n_text == 1) && ((len == 1) || ((len==2) && (pTextFragList->pTextStart[1]==' '))))
  465. {
  466. // A single text fragment with one character. Speak as a character, not a word
  467. sayas = 0x11;
  468. gSayas = 0;
  469. }
  470. #endif
  471. if(frag_count >= n_frag_offsets)
  472. {
  473. if((frag_offsets = (FRAG_OFFSET *)realloc(frag_offsets,sizeof(FRAG_OFFSET)*(frag_count+500))) != NULL)
  474. {
  475. n_frag_offsets = frag_count+500;
  476. }
  477. }
  478. // first set the volume, rate, pitch
  479. volume = (state->Volume * master_volume)/100;
  480. speed = ConvertRate(state->RateAdj);
  481. pitch = ConvertPitch(state->PitchAdj.MiddleAdj);
  482. range = ConvertRange(state->PitchAdj.RangeAdj);
  483. emphasis = state->EmphAdj;
  484. if(emphasis != 0)
  485. emphasis = 3;
  486. len = 0;
  487. if(volume != gVolume)
  488. {
  489. sprintf(&cmdbuf[len],"%c%dA",CTRL_EMBEDDED,volume);
  490. len += strlen(&cmdbuf[len]);
  491. }
  492. if(speed != gSpeed)
  493. {
  494. sprintf(&cmdbuf[len],"%c%dS",CTRL_EMBEDDED,speed);
  495. len += strlen(&cmdbuf[len]);
  496. }
  497. if(pitch != gPitch)
  498. {
  499. sprintf(&cmdbuf[len],"%c%dP",CTRL_EMBEDDED,pitch);
  500. len += strlen(&cmdbuf[len]);
  501. }
  502. if(range != gRange)
  503. {
  504. sprintf(&cmdbuf[len],"%c%dR",CTRL_EMBEDDED,range);
  505. len += strlen(&cmdbuf[len]);
  506. }
  507. if(emphasis != gEmphasis)
  508. {
  509. sprintf(&cmdbuf[len],"%c%dF",CTRL_EMBEDDED,emphasis);
  510. len += strlen(&cmdbuf[len]);
  511. }
  512. if(sayas != gSayas)
  513. {
  514. sprintf(&cmdbuf[len],"%c%dY",CTRL_EMBEDDED,sayas);
  515. len += strlen(&cmdbuf[len]);
  516. }
  517. gVolume = volume;
  518. gSpeed = speed;
  519. gPitch = pitch;
  520. gRange = range;
  521. gEmphasis = emphasis;
  522. gSayas = sayas;
  523. total += (len + pTextFragList->ulTextLen);
  524. if(pTextFragList->ulTextLen > 0)
  525. {
  526. total++;
  527. }
  528. if(pW != NULL)
  529. {
  530. for(ix=0; ix<len; ix++)
  531. {
  532. *pW++ = cmdbuf[ix];
  533. }
  534. frag_offsets[frag_count].textix = text_offset;
  535. frag_offsets[frag_count].bufix = pW - pW_start;
  536. frag_offsets[frag_count].cmdlen = len;
  537. #ifdef TEST_INPUT
  538. {
  539. FILE *f;
  540. unsigned int c;
  541. int n;
  542. char buf[10];
  543. f = fopen("C:\\espeak_text_log.txt","a");
  544. if(f != NULL)
  545. {
  546. fprintf(f,"----------\n");
  547. for(ix=0; ix<pTextFragList->ulTextLen; ix++)
  548. {
  549. c = pTextFragList->pTextStart[ix];
  550. n = utf8_out(c,buf);
  551. buf[n] = 0;
  552. fprintf(f,"%s",buf);
  553. }
  554. fprintf(f,"\n");
  555. fclose(f);
  556. }
  557. }
  558. #endif
  559. for(ix=0; ix<pTextFragList->ulTextLen; ix++)
  560. {
  561. *pW++ = pTextFragList->pTextStart[ix];
  562. }
  563. if(pTextFragList->ulTextLen > 0)
  564. {
  565. *pW++ = ' ';
  566. }
  567. }
  568. frag_count++;
  569. break;
  570. case SPVA_Bookmark:
  571. total += (2 + pTextFragList->ulTextLen);
  572. if(pW != NULL)
  573. {
  574. int index;
  575. for(ix=0; ix<pTextFragList->ulTextLen; ix++)
  576. {
  577. markbuf[ix] = (char )pTextFragList->pTextStart[ix];
  578. }
  579. markbuf[ix] = 0;
  580. if((index = AddNameData((const char *)markbuf,1)) >= 0)
  581. {
  582. sprintf(cmdbuf,"%c%dM",CTRL_EMBEDDED,index);
  583. len = strlen(cmdbuf);
  584. for(ix=0; ix<len; ix++)
  585. {
  586. *pW++ = cmdbuf[ix];
  587. }
  588. }
  589. }
  590. break;
  591. case SPVA_Pronounce:
  592. total += WritePhonemes(state->pPhoneIds, pW);
  593. if(pW != NULL)
  594. {
  595. pW += total;
  596. }
  597. break;
  598. }
  599. pTextFragList = pTextFragList->pNext;
  600. }
  601. if(pW != NULL)
  602. {
  603. *pW = 0;
  604. }
  605. *n_text = frag_count;
  606. return(total);
  607. } // end of ProcessFragList
  608. /*****************************************************************************
  609. * CTTSEngObj::Speak *
  610. *-------------------*
  611. * Description:
  612. * This is the primary method that SAPI calls to render text.
  613. *-----------------------------------------------------------------------------
  614. * Input Parameters
  615. *
  616. * pUser
  617. * Pointer to the current user profile object. This object contains
  618. * information like what languages are being used and this object
  619. * also gives access to resources like the SAPI master lexicon object.
  620. *
  621. * dwSpeakFlags
  622. * This is a set of flags used to control the behavior of the
  623. * SAPI voice object and the associated engine.
  624. *
  625. * VoiceFmtIndex
  626. * Zero based index specifying the output format that should
  627. * be used during rendering.
  628. *
  629. * pTextFragList
  630. * A linked list of text fragments to be rendered. There is
  631. * one fragement per XML state change. If the input text does
  632. * not contain any XML markup, there will only be a single fragment.
  633. *
  634. * pOutputSite
  635. * The interface back to SAPI where all output audio samples and events are written.
  636. *
  637. * Return Values
  638. * S_OK - This should be returned after successful rendering or if
  639. * rendering was interrupted because *pfContinue changed to FALSE.
  640. * E_INVALIDARG
  641. * E_OUTOFMEMORY
  642. *
  643. *****************************************************************************/
  644. STDMETHODIMP CTTSEngObj::Speak( DWORD dwSpeakFlags,
  645. REFGUID rguidFormatId,
  646. const WAVEFORMATEX * pWaveFormatEx,
  647. const SPVTEXTFRAG* pTextFragList,
  648. ISpTTSEngineSite* pOutputSite )
  649. {
  650. SPDBG_FUNC( "CTTSEngObj::Speak" );
  651. HRESULT hr = S_OK;
  652. unsigned int size;
  653. int xVolume;
  654. int xSpeed;
  655. int xPitch;
  656. int xRange;
  657. int xEmphasis;
  658. int xSayas;
  659. int punctuation;
  660. int n_text_frag=0;
  661. //--- Check args
  662. if( SP_IS_BAD_INTERFACE_PTR( pOutputSite ) ||
  663. SP_IS_BAD_READ_PTR( pTextFragList ) )
  664. {
  665. hr = E_INVALIDARG;
  666. }
  667. else
  668. {
  669. InitNamedata();
  670. //--- Init some vars
  671. m_pCurrFrag = pTextFragList;
  672. m_pNextChar = m_pCurrFrag->pTextStart;
  673. m_pEndChar = m_pNextChar + m_pCurrFrag->ulTextLen;
  674. m_ullAudioOff = 0;
  675. m_OutputSite = pOutputSite;
  676. pOutputSite->GetEventInterest(&event_interest);
  677. xVolume = gVolume;
  678. xSpeed = gSpeed;
  679. xPitch = gPitch;
  680. xRange = gRange;
  681. xEmphasis = gEmphasis;
  682. xSayas = gSayas;
  683. // find the size of the text buffer needed for this Speak() request
  684. size = ProcessFragList(pTextFragList,NULL,pOutputSite,&n_text_frag);
  685. gVolume = xVolume;
  686. gSpeed = xSpeed;
  687. gPitch = xPitch;
  688. gRange = xRange;
  689. gEmphasis = xEmphasis;
  690. gSayas = xSayas;
  691. punctuation = 0;
  692. if(dwSpeakFlags & SPF_NLP_SPEAK_PUNC)
  693. punctuation = 1;
  694. espeak_SetParameter(espeakPUNCTUATION,punctuation,0);
  695. size = (size + 50)*sizeof(wchar_t);
  696. if(size > gBufSize)
  697. {
  698. size += 1000; // some extra so we don't need to realloc() again too often
  699. TextBuf = (wchar_t *)realloc(TextBuf,size);
  700. if(TextBuf == NULL)
  701. {
  702. gBufSize=0;
  703. return(1);
  704. }
  705. gBufSize = size;
  706. }
  707. audio_latest = 0;
  708. prev_phoneme = 0;
  709. prev_phoneme_time = 0;
  710. prev_phoneme_position = 0;
  711. size = ProcessFragList(pTextFragList,TextBuf,pOutputSite,&n_text_frag);
  712. if(size > 0)
  713. {
  714. espeak_Synth(TextBuf,0,0,POS_CHARACTER,0,espeakCHARS_WCHAR | espeakKEEP_NAMEDATA | espeakPHONEMES,NULL,NULL);
  715. }
  716. }
  717. return hr;
  718. } /* CTTSEngObj::Speak */
  719. HRESULT CTTSEngObj::CheckActions( ISpTTSEngineSite* pOutputSite )
  720. {//==============================================================
  721. int control;
  722. USHORT volume;
  723. long rate;
  724. control = pOutputSite->GetActions();
  725. if(control & SPVES_VOLUME)
  726. {
  727. if(pOutputSite->GetVolume(&volume) == S_OK)
  728. {
  729. master_volume = volume;
  730. }
  731. }
  732. if(control & SPVES_RATE)
  733. {
  734. if(pOutputSite->GetRate(&rate) == S_OK)
  735. {
  736. master_rate = rate;
  737. }
  738. }
  739. return(S_OK);
  740. } // end of CTTSEngObj::CheckActions
  741. STDMETHODIMP CTTSEngObj::GetOutputFormat( const GUID * pTargetFormatId, const WAVEFORMATEX * pTargetWaveFormatEx,
  742. GUID * pDesiredFormatId, WAVEFORMATEX ** ppCoMemDesiredWaveFormatEx )
  743. {//========================================================================
  744. SPDBG_FUNC( "CTTSEngObj::GetVoiceFormat" );
  745. HRESULT hr = S_OK;
  746. enum SPSTREAMFORMAT sample_rate = SPSF_22kHz16BitMono;
  747. srate = 441;
  748. if(espeak_GetParameter(espeakVOICETYPE,1) == 1)
  749. {
  750. srate = 320;
  751. sample_rate = SPSF_16kHz16BitMono; // an mbrola voice
  752. }
  753. hr = SpConvertStreamFormatEnum(sample_rate, pDesiredFormatId, ppCoMemDesiredWaveFormatEx);
  754. return hr;
  755. } /* CTTSEngObj::GetVoiceFormat */
  756. int FAR PASCAL CompileDictionary(const char *voice, const char *path_log)
  757. {//===========================================================
  758. FILE *f_log3;
  759. char fname[120];
  760. f_log3 = fopen(path_log,"w");
  761. sprintf(fname,"%s/",path_install);
  762. espeak_SetVoiceByName(voice);
  763. espeak_CompileDictionary(fname,f_log3,0);
  764. fclose(f_log3);
  765. return(0);
  766. }