eSpeak NG is an open source speech synthesizer that supports more than hundred languages and accents.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

ttsengobj.cpp 19KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. /***************************************************************************
  2. * Copyright (C) 2005 to 2007 by Jonathan Duddington *
  3. * email: [email protected] *
  4. * *
  5. * This program is free software; you can redistribute it and/or modify *
  6. * it under the terms of the GNU General Public License as published by *
  7. * the Free Software Foundation; either version 3 of the License, or *
  8. * (at your option) any later version. *
  9. * *
  10. * This program is distributed in the hope that it will be useful, *
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  13. * GNU General Public License for more details. *
  14. * *
  15. * You should have received a copy of the GNU General Public License *
  16. * along with this program; if not, write see: *
  17. * <http://www.gnu.org/licenses/>. *
  18. ***************************************************************************/
  19. #include "stdafx.h"
  20. #include "TtsEngObj.h"
  21. #include "src/speak_lib.h"
  22. #include "stdio.h"
  23. #define CTRL_EMBEDDED 1
  24. CTTSEngObj *m_EngObj;
  25. ISpTTSEngineSite* m_OutputSite;
  26. FILE *f_log2=NULL;
  27. extern int AddNameData(const char *name, int wide);
  28. extern void InitNamedata(void);
  29. int master_volume = 100;
  30. int master_rate = 0;
  31. int gVolume = 100;
  32. int gSpeed = -1;
  33. int gPitch = -1;
  34. int gRange = -1;
  35. int gEmphasis = 0;
  36. int gSayas = 0;
  37. char *path_install = NULL;
  38. unsigned long audio_offset = 0;
  39. unsigned long audio_latest = 0;
  40. unsigned int gBufSize = 0;
  41. wchar_t *TextBuf=NULL;
  42. typedef struct {
  43. unsigned int bufix;
  44. unsigned int textix;
  45. unsigned int cmdlen;
  46. } FRAG_OFFSET;
  47. int srate; // samplerate, Hz/50
  48. int n_frag_offsets = 0;
  49. int frag_ix = 0;
  50. int frag_count=0;
  51. FRAG_OFFSET *frag_offsets = NULL;
  52. int SynthCallback(short *wav, int numsamples, espeak_EVENT *events);
  53. int SynthCallback(short *wav, int numsamples, espeak_EVENT *events)
  54. {//================================================================
  55. int hr;
  56. wchar_t *tailptr;
  57. unsigned int text_offset;
  58. int length;
  59. espeak_EVENT *event;
  60. #define N_EVENTS 100
  61. int n_Events = 0;
  62. SPEVENT *Event;
  63. SPEVENT Events[N_EVENTS];
  64. if(m_OutputSite->GetActions() & SPVES_ABORT)
  65. return(1);
  66. m_EngObj->CheckActions(m_OutputSite);
  67. // return the events
  68. for(event=events; event->type != 0; event++)
  69. {
  70. audio_latest = event->audio_position + audio_offset;
  71. if((event->type == espeakEVENT_WORD) && (event->length > 0))
  72. {
  73. while(((frag_ix+1) < frag_count) &&
  74. ((event->text_position -1 + frag_offsets[frag_ix+1].cmdlen) >= frag_offsets[frag_ix+1].bufix))
  75. {
  76. frag_ix++;
  77. }
  78. text_offset = frag_offsets[frag_ix].textix +
  79. event->text_position -1 - frag_offsets[frag_ix].bufix + frag_offsets[frag_ix].cmdlen;
  80. length = event->length - frag_offsets[frag_ix].cmdlen;
  81. frag_offsets[frag_ix].cmdlen = 0;
  82. if(text_offset < 0)
  83. text_offset = 0;
  84. Event = &Events[n_Events++];
  85. Event->eEventId = SPEI_WORD_BOUNDARY;
  86. Event->elParamType = SPET_LPARAM_IS_UNDEFINED;
  87. Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10; // ms -> bytes
  88. Event->lParam = text_offset;
  89. Event->wParam = length;
  90. }
  91. if(event->type == espeakEVENT_MARK)
  92. {
  93. Event = &Events[n_Events++];
  94. Event->eEventId = SPEI_TTS_BOOKMARK;
  95. Event->elParamType = SPET_LPARAM_IS_STRING;
  96. Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10; // ms -> bytes
  97. Event->lParam = (long)event->id.name;
  98. Event->wParam = wcstol((wchar_t *)event->id.name,&tailptr,10);
  99. }
  100. #ifdef deleted
  101. if(event->type == espeakEVENT_SENTENCE)
  102. {
  103. Event = &Events[n_Events++];
  104. Event->eEventId = SPEI_SENTENCE_BOUNDARY;
  105. Event->elParamType = SPET_LPARAM_IS_UNDEFINED;
  106. Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10; // ms -> bytes
  107. Event->lParam = 0;
  108. Event->wParam = 0; // TEMP
  109. }
  110. #endif
  111. }
  112. if(n_Events > 0)
  113. m_OutputSite->AddEvents(Events, n_Events );
  114. // return the sound data
  115. hr = m_OutputSite->Write(wav, numsamples*2, NULL);
  116. return(hr);
  117. }
  118. static int ConvertRate(int new_rate)
  119. {//=================================
  120. int rate;
  121. static int rate_table[21] = {80,100,115,124,133,142,151,159,168,174,180,
  122. 187,196,208,220,240,270,300,335,369,390 };
  123. rate = new_rate + master_rate;
  124. if(rate < -10) rate = -10;
  125. if(rate > 10) rate = 10;
  126. return(rate_table[rate+10]);
  127. } // end of ConvertRate
  128. static int ConvertPitch(int pitch)
  129. {//===============================
  130. static int pitch_table[41] =
  131. {0, 0, 0, 0, 0, 0, 0, 0, 4, 8,12,16,20,24,28,32,36,40,44,47,50,
  132. 54,58,62,66,70,74,78,82,84,88,92,96,99,99,99,99,99,99,99,99};
  133. // {0,3,5,8,10,13,15,18,20,23,25,28,30,33,35,38,40,43,45,48,50,
  134. // 53,55,58,60,63,65,68,70,73,75,78,80,83,85,88,90,93,95,97,99};
  135. if(pitch < -20) pitch = -20;
  136. if(pitch > 20) pitch = 20;
  137. return(pitch_table[pitch+20]);
  138. }
  139. static int ConvertRange(int range)
  140. {//===============================
  141. static int range_table[21] = {16,28,39,49,58,66,74,81,88,94,100,105,110,115,120,125,130,135,140,145,150};
  142. if(range < -10) range = -10;
  143. if(range > 10) range = 10;
  144. return(range_table[range+10]/2);
  145. }
  146. HRESULT CTTSEngObj::FinalConstruct()
  147. {//=================================
  148. SPDBG_FUNC( "CTTSEngObj::FinalConstruct" );
  149. HRESULT hr = S_OK;
  150. #ifdef LOG_DEBUG
  151. f_log2=fopen("C:\\log_espeak","a");
  152. if(f_log2) fprintf(f_log2,"\n****\n");
  153. #endif
  154. //--- Init vars
  155. m_hVoiceData = NULL;
  156. m_pVoiceData = NULL;
  157. m_pWordList = NULL;
  158. m_ulNumWords = 0;
  159. m_EngObj = this;
  160. return hr;
  161. } /* CTTSEngObj::FinalConstruct */
  162. void CTTSEngObj::FinalRelease()
  163. {//============================
  164. SPDBG_FUNC( "CTTSEngObj::FinalRelease" );
  165. delete m_pWordList;
  166. #ifdef LOG_DEBUG
  167. if(f_log2!=NULL) fclose(f_log2);
  168. #endif
  169. if( m_pVoiceData )
  170. {
  171. ::UnmapViewOfFile( (void*)m_pVoiceData );
  172. }
  173. if( m_hVoiceData )
  174. {
  175. ::CloseHandle( m_hVoiceData );
  176. }
  177. } /* CTTSEngObj::FinalRelease */
  178. //
  179. //=== ISpObjectWithToken Implementation ======================================
  180. //
  181. void WcharToChar(char *out, const wchar_t *in, int len)
  182. {//====================================================
  183. int ix;
  184. for(ix=0; ix<len; ix++)
  185. {
  186. if((out[ix] = (char)in[ix]) == 0)
  187. break;
  188. }
  189. out[len-1] = 0;
  190. }
  191. /*****************************************************************************
  192. * CTTSEngObj::SetObjectToken *
  193. *----------------------------*
  194. * Description:
  195. * Read the "VoiceName" attribute from the registry, and use it to select
  196. * an eSpeak voice file
  197. *****************************************************************************/
  198. STDMETHODIMP CTTSEngObj::SetObjectToken(ISpObjectToken * pToken)
  199. {
  200. char voice[80];
  201. strcpy(voice,"default");
  202. SPDBG_FUNC( "CTTSEngObj::SetObjectToken" );
  203. HRESULT hr = SpGenericSetObjectToken(pToken, m_cpToken);
  204. if( SUCCEEDED( hr ) )
  205. {
  206. CSpDynamicString voicename;
  207. CSpDynamicString path;
  208. HRESULT hr2;
  209. int len;
  210. hr2 = m_cpToken->GetStringValue( L"VoiceName", &voicename);
  211. if( SUCCEEDED(hr2) )
  212. {
  213. WcharToChar(voice,voicename,sizeof(voice));
  214. }
  215. hr2 = m_cpToken->GetStringValue( L"Path", &path);
  216. if( SUCCEEDED(hr2) )
  217. {
  218. len = wcslen(path)+1;
  219. path_install = (char *)malloc(len);
  220. WcharToChar(path_install,path,len);
  221. }
  222. }
  223. gVolume = 100;
  224. gSpeed = -1;
  225. gPitch = -1;
  226. gRange = -1;
  227. gEmphasis = 0;
  228. gSayas = 0;
  229. espeak_Initialize(AUDIO_OUTPUT_SYNCHRONOUS,100,path_install,0);
  230. espeak_SetVoiceByName(voice);
  231. espeak_SetSynthCallback(SynthCallback);
  232. return hr;
  233. } /* CTTSEngObj::SetObjectToken */
  234. //
  235. //=== ISpTTSEngine Implementation ============================================
  236. //
  237. #define L(c1,c2) (c1<<8)+c2 // combine two characters into an integer
  238. static char *phoneme_names_en[] = {
  239. NULL,NULL,NULL," ",NULL,NULL,NULL,NULL,"'",",",
  240. "A:","a","V","0","aU","@","aI",
  241. "b","tS","d","D","E","3:","eI",
  242. "f","g","h","I","i:","dZ","k",
  243. "l","m","n","N","oU","OI","p",
  244. "r","s","S","t","T","U","u:",
  245. "v","w","j","z","Z",
  246. NULL
  247. };
  248. int CTTSEngObj::WritePhonemes(SPPHONEID *phons, wchar_t *pW)
  249. {//=========================================================
  250. int ph;
  251. int ix=2;
  252. int skip=0;
  253. int maxph = 49;
  254. char *p;
  255. int j;
  256. int lang;
  257. char **phoneme_names;
  258. char phbuf[200];
  259. espeak_VOICE *voice;
  260. voice = espeak_GetCurrentVoice();
  261. lang = (voice->languages[1] << 8) + (voice->languages[2]);
  262. phoneme_names = phoneme_names_en;
  263. maxph = 0;
  264. if(lang == L('e','n'))
  265. {
  266. phoneme_names = phoneme_names_en;
  267. maxph = 49;
  268. }
  269. if(maxph == 0)
  270. return(0);
  271. strcpy(phbuf,"[[");
  272. while(((ph = *phons++) != 0) && (ix < (sizeof(phbuf) - 3)))
  273. {
  274. if(skip)
  275. {
  276. skip = 0;
  277. continue;
  278. }
  279. if(ph > maxph)
  280. continue;
  281. p = phoneme_names[phons[0]]; // look at the phoneme after this one
  282. if(p != NULL)
  283. {
  284. if(p[0] == '\'')
  285. {
  286. phbuf[ix++] = '\''; // primary stress, put before the vowel, not after
  287. skip=1;
  288. }
  289. if(p[0] == ',')
  290. {
  291. phbuf[ix++] = ','; // secondary stress
  292. skip=1;
  293. }
  294. }
  295. p = phoneme_names[ph]; // look at this phoneme
  296. if(p != NULL)
  297. {
  298. strcpy(&phbuf[ix],p);
  299. ix += strlen(p);
  300. }
  301. }
  302. strcpy(&phbuf[ix],"]]");
  303. ix += 2;
  304. if(pW != NULL)
  305. {
  306. for(j=0; j<=ix; j++)
  307. {
  308. pW[j] = phbuf[j];
  309. }
  310. }
  311. return(strlen(phbuf));
  312. }
  313. int CTTSEngObj::ProcessFragList(const SPVTEXTFRAG* pTextFragList, wchar_t *pW_start, ISpTTSEngineSite* pOutputSite, int *n_text)
  314. {//============================================================================================================================
  315. int action;
  316. int control;
  317. wchar_t *pW;
  318. const SPVSTATE *state;
  319. unsigned int ix;
  320. unsigned int len;
  321. unsigned int total=0;
  322. char cmdbuf[50];
  323. wchar_t markbuf[32];
  324. int speed;
  325. int volume;
  326. int pitch;
  327. int range;
  328. int emphasis;
  329. int sayas;
  330. unsigned int text_offset = 0;
  331. frag_count = 0;
  332. frag_ix = 0;
  333. pW = pW_start;
  334. while(pTextFragList != NULL)
  335. {
  336. action = pTextFragList->State.eAction;
  337. control = pOutputSite->GetActions();
  338. len = pTextFragList->ulTextLen;
  339. if(control & SPVES_ABORT)
  340. break;
  341. CheckActions(pOutputSite);
  342. sayas = 0;
  343. state = &pTextFragList->State;
  344. switch(action)
  345. {
  346. case SPVA_SpellOut:
  347. sayas = 0x12; // SAYAS_CHARS; // drop through to SPVA_Speak
  348. case SPVA_Speak:
  349. text_offset = pTextFragList->ulTextSrcOffset;
  350. audio_offset = audio_latest;
  351. #ifdef deleted
  352. // attempt to recognise when JAWS is spelling, it doesn't use SPVA_SpellOut
  353. if((pW != NULL) && (*n_text == 1) && ((len == 1) || ((len==2) && (pTextFragList->pTextStart[1]==' '))))
  354. {
  355. // A single text fragment with one character. Speak as a character, not a word
  356. sayas = 0x11;
  357. gSayas = 0;
  358. }
  359. #endif
  360. if(frag_count >= n_frag_offsets)
  361. {
  362. if((frag_offsets = (FRAG_OFFSET *)realloc(frag_offsets,sizeof(FRAG_OFFSET)*(frag_count+500))) != NULL)
  363. {
  364. n_frag_offsets = frag_count+500;
  365. }
  366. }
  367. // first set the volume, rate, pitch
  368. volume = (state->Volume * master_volume)/100;
  369. speed = ConvertRate(state->RateAdj);
  370. pitch = ConvertPitch(state->PitchAdj.MiddleAdj);
  371. range = ConvertRange(state->PitchAdj.RangeAdj);
  372. emphasis = state->EmphAdj;
  373. if(emphasis != 0)
  374. emphasis = 3;
  375. len = 0;
  376. if(volume != gVolume)
  377. {
  378. sprintf(&cmdbuf[len],"%c%dA",CTRL_EMBEDDED,volume);
  379. len += strlen(&cmdbuf[len]);
  380. }
  381. if(speed != gSpeed)
  382. {
  383. sprintf(&cmdbuf[len],"%c%dS",CTRL_EMBEDDED,speed);
  384. len += strlen(&cmdbuf[len]);
  385. }
  386. if(pitch != gPitch)
  387. {
  388. sprintf(&cmdbuf[len],"%c%dP",CTRL_EMBEDDED,pitch);
  389. len += strlen(&cmdbuf[len]);
  390. }
  391. if(range != gRange)
  392. {
  393. sprintf(&cmdbuf[len],"%c%dR",CTRL_EMBEDDED,range);
  394. len += strlen(&cmdbuf[len]);
  395. }
  396. if(emphasis != gEmphasis)
  397. {
  398. sprintf(&cmdbuf[len],"%c%dF",CTRL_EMBEDDED,emphasis);
  399. len += strlen(&cmdbuf[len]);
  400. }
  401. if(sayas != gSayas)
  402. {
  403. sprintf(&cmdbuf[len],"%c%dY",CTRL_EMBEDDED,sayas);
  404. len += strlen(&cmdbuf[len]);
  405. }
  406. gVolume = volume;
  407. gSpeed = speed;
  408. gPitch = pitch;
  409. gRange = range;
  410. gEmphasis = emphasis;
  411. gSayas = sayas;
  412. total += (len + pTextFragList->ulTextLen);
  413. if(pTextFragList->ulTextLen > 0)
  414. {
  415. total++;
  416. }
  417. if(pW != NULL)
  418. {
  419. for(ix=0; ix<len; ix++)
  420. {
  421. *pW++ = cmdbuf[ix];
  422. }
  423. frag_offsets[frag_count].textix = text_offset;
  424. frag_offsets[frag_count].bufix = pW - pW_start;
  425. frag_offsets[frag_count].cmdlen = len;
  426. for(ix=0; ix<pTextFragList->ulTextLen; ix++)
  427. {
  428. *pW++ = pTextFragList->pTextStart[ix];
  429. }
  430. if(pTextFragList->ulTextLen > 0)
  431. {
  432. *pW++ = ' ';
  433. }
  434. }
  435. frag_count++;
  436. break;
  437. case SPVA_Bookmark:
  438. total += (2 + pTextFragList->ulTextLen);
  439. if(pW != NULL)
  440. {
  441. int index;
  442. for(ix=0; ix<pTextFragList->ulTextLen; ix++)
  443. {
  444. markbuf[ix] = (char )pTextFragList->pTextStart[ix];
  445. }
  446. markbuf[ix] = 0;
  447. if((index = AddNameData((const char *)markbuf,1)) >= 0)
  448. {
  449. sprintf(cmdbuf,"%c%dM",CTRL_EMBEDDED,index);
  450. len = strlen(cmdbuf);
  451. for(ix=0; ix<len; ix++)
  452. {
  453. *pW++ = cmdbuf[ix];
  454. }
  455. }
  456. }
  457. break;
  458. case SPVA_Pronounce:
  459. total += WritePhonemes(state->pPhoneIds, pW);
  460. if(pW != NULL)
  461. {
  462. pW += total;
  463. }
  464. break;
  465. }
  466. pTextFragList = pTextFragList->pNext;
  467. }
  468. if(pW != NULL)
  469. {
  470. *pW = 0;
  471. }
  472. *n_text = frag_count;
  473. return(total);
  474. } // end of ProcessFragList
  475. /*****************************************************************************
  476. * CTTSEngObj::Speak *
  477. *-------------------*
  478. * Description:
  479. * This is the primary method that SAPI calls to render text.
  480. *-----------------------------------------------------------------------------
  481. * Input Parameters
  482. *
  483. * pUser
  484. * Pointer to the current user profile object. This object contains
  485. * information like what languages are being used and this object
  486. * also gives access to resources like the SAPI master lexicon object.
  487. *
  488. * dwSpeakFlags
  489. * This is a set of flags used to control the behavior of the
  490. * SAPI voice object and the associated engine.
  491. *
  492. * VoiceFmtIndex
  493. * Zero based index specifying the output format that should
  494. * be used during rendering.
  495. *
  496. * pTextFragList
  497. * A linked list of text fragments to be rendered. There is
  498. * one fragement per XML state change. If the input text does
  499. * not contain any XML markup, there will only be a single fragment.
  500. *
  501. * pOutputSite
  502. * The interface back to SAPI where all output audio samples and events are written.
  503. *
  504. * Return Values
  505. * S_OK - This should be returned after successful rendering or if
  506. * rendering was interrupted because *pfContinue changed to FALSE.
  507. * E_INVALIDARG
  508. * E_OUTOFMEMORY
  509. *
  510. *****************************************************************************/
  511. STDMETHODIMP CTTSEngObj::Speak( DWORD dwSpeakFlags,
  512. REFGUID rguidFormatId,
  513. const WAVEFORMATEX * pWaveFormatEx,
  514. const SPVTEXTFRAG* pTextFragList,
  515. ISpTTSEngineSite* pOutputSite )
  516. {
  517. SPDBG_FUNC( "CTTSEngObj::Speak" );
  518. HRESULT hr = S_OK;
  519. unsigned int size;
  520. int xVolume;
  521. int xSpeed;
  522. int xPitch;
  523. int xRange;
  524. int xEmphasis;
  525. int xSayas;
  526. int punctuation;
  527. int n_text_frag=0;
  528. //--- Check args
  529. if( SP_IS_BAD_INTERFACE_PTR( pOutputSite ) ||
  530. SP_IS_BAD_READ_PTR( pTextFragList ) )
  531. {
  532. hr = E_INVALIDARG;
  533. }
  534. else
  535. {
  536. InitNamedata();
  537. //--- Init some vars
  538. m_pCurrFrag = pTextFragList;
  539. m_pNextChar = m_pCurrFrag->pTextStart;
  540. m_pEndChar = m_pNextChar + m_pCurrFrag->ulTextLen;
  541. m_ullAudioOff = 0;
  542. m_OutputSite = pOutputSite;
  543. xVolume = gVolume;
  544. xSpeed = gSpeed;
  545. xPitch = gPitch;
  546. xRange = gRange;
  547. xEmphasis = gEmphasis;
  548. xSayas = gSayas;
  549. // find the size of the text buffer needed for this Speak() request
  550. size = ProcessFragList(pTextFragList,NULL,pOutputSite,&n_text_frag);
  551. gVolume = xVolume;
  552. gSpeed = xSpeed;
  553. gPitch = xPitch;
  554. gRange = xRange;
  555. gEmphasis = xEmphasis;
  556. gSayas = xSayas;
  557. punctuation = 0;
  558. if(dwSpeakFlags & SPF_NLP_SPEAK_PUNC)
  559. punctuation = 1;
  560. espeak_SetParameter(espeakPUNCTUATION,punctuation,0);
  561. size = (size + 50)*sizeof(wchar_t);
  562. if(size > gBufSize)
  563. {
  564. size += 1000; // some extra so we don't need to realloc() again too often
  565. TextBuf = (wchar_t *)realloc(TextBuf,size);
  566. if(TextBuf == NULL)
  567. {
  568. gBufSize=0;
  569. return(1);
  570. }
  571. gBufSize = size;
  572. }
  573. audio_latest = 0;
  574. size = ProcessFragList(pTextFragList,TextBuf,pOutputSite,&n_text_frag);
  575. if(size > 0)
  576. {
  577. espeak_Synth(TextBuf,0,0,POS_CHARACTER,0,espeakCHARS_WCHAR | espeakKEEP_NAMEDATA | espeakPHONEMES,NULL,NULL);
  578. }
  579. }
  580. return hr;
  581. } /* CTTSEngObj::Speak */
  582. HRESULT CTTSEngObj::CheckActions( ISpTTSEngineSite* pOutputSite )
  583. {//==============================================================
  584. int control;
  585. USHORT volume;
  586. long rate;
  587. control = pOutputSite->GetActions();
  588. if(control & SPVES_VOLUME)
  589. {
  590. if(pOutputSite->GetVolume(&volume) == S_OK)
  591. {
  592. master_volume = volume;
  593. }
  594. }
  595. if(control & SPVES_RATE)
  596. {
  597. if(pOutputSite->GetRate(&rate) == S_OK)
  598. {
  599. master_rate = rate;
  600. }
  601. }
  602. return(S_OK);
  603. } // end of CTTSEngObj::CheckActions
  604. STDMETHODIMP CTTSEngObj::GetOutputFormat( const GUID * pTargetFormatId, const WAVEFORMATEX * pTargetWaveFormatEx,
  605. GUID * pDesiredFormatId, WAVEFORMATEX ** ppCoMemDesiredWaveFormatEx )
  606. {//========================================================================
  607. SPDBG_FUNC( "CTTSEngObj::GetVoiceFormat" );
  608. HRESULT hr = S_OK;
  609. enum SPSTREAMFORMAT sample_rate = SPSF_22kHz16BitMono;
  610. srate = 441;
  611. if(espeak_GetParameter(espeakVOICETYPE,1) == 1)
  612. {
  613. srate = 320;
  614. sample_rate = SPSF_16kHz16BitMono; // an mbrola voice
  615. }
  616. hr = SpConvertStreamFormatEnum(sample_rate, pDesiredFormatId, ppCoMemDesiredWaveFormatEx);
  617. return hr;
  618. } /* CTTSEngObj::GetVoiceFormat */
  619. int FAR PASCAL CompileDictionary(const char *voice, const char *path_log)
  620. {//===========================================================
  621. FILE *f_log3;
  622. char fname[120];
  623. f_log3 = fopen(path_log,"w");
  624. sprintf(fname,"%s/",path_install);
  625. espeak_SetVoiceByName(voice);
  626. espeak_CompileDictionary(fname,f_log3);
  627. fclose(f_log3);
  628. return(0);
  629. }