eSpeak NG is an open source speech synthesizer that supports more than hundred languages and accents.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

ttsengobj.cpp 21KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848
  1. /***************************************************************************
  2. * Copyright (C) 2005 to 2007 by Jonathan Duddington *
  3. * email: [email protected] *
  4. * *
  5. * This program is free software; you can redistribute it and/or modify *
  6. * it under the terms of the GNU General Public License as published by *
  7. * the Free Software Foundation; either version 3 of the License, or *
  8. * (at your option) any later version. *
  9. * *
  10. * This program is distributed in the hope that it will be useful, *
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  13. * GNU General Public License for more details. *
  14. * *
  15. * You should have received a copy of the GNU General Public License *
  16. * along with this program; if not, write see: *
  17. * <http://www.gnu.org/licenses/>. *
  18. ***************************************************************************/
  19. #include "stdafx.h"
  20. #include "TtsEngObj.h"
  21. #include "src/speak_lib.h"
  22. #include "stdio.h"
  23. #define CTRL_EMBEDDED 1
  24. CTTSEngObj *m_EngObj;
  25. ISpTTSEngineSite* m_OutputSite;
  26. FILE *f_log2=NULL;
  27. ULONGLONG event_interest;
  28. extern int AddNameData(const char *name, int wide);
  29. extern void InitNamedata(void);
  30. int master_volume = 100;
  31. int master_rate = 0;
  32. int gVolume = 100;
  33. int gSpeed = -1;
  34. int gPitch = -1;
  35. int gRange = -1;
  36. int gEmphasis = 0;
  37. int gSayas = 0;
  38. char *path_install = NULL;
  39. unsigned long audio_offset = 0;
  40. unsigned long audio_latest = 0;
  41. int prev_phoneme = 0;
  42. int prev_phoneme_position = 0;
  43. unsigned long prev_phoneme_time = 0;
  44. unsigned int gBufSize = 0;
  45. wchar_t *TextBuf=NULL;
  46. typedef struct {
  47. unsigned int bufix;
  48. unsigned int textix;
  49. unsigned int cmdlen;
  50. } FRAG_OFFSET;
  51. int srate; // samplerate, Hz/50
  52. int n_frag_offsets = 0;
  53. int frag_ix = 0;
  54. int frag_count=0;
  55. FRAG_OFFSET *frag_offsets = NULL;
  56. int VisemeCode(unsigned int phoneme_name)
  57. {//======================================
  58. // Convert eSpeak phoneme name into a SAPI viseme code
  59. int ix;
  60. unsigned int ph;
  61. unsigned int ph_name;
  62. #define PH(c1,c2) (c2<<8)+c1 // combine two characters into an integer for phoneme name
  63. const unsigned char initial_to_viseme[128] = {
  64. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  65. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  66. 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,19, 0, 0, 0, 0, 0,
  67. 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,255,
  68. 4, 2,18,16,17, 4,18,20,12, 6,16,20,14,21,20, 3,
  69. 21,20,13,16,17, 4, 1, 5,20, 7,16, 0, 0, 0, 0, 0,
  70. 0, 1,21,16,19, 4,18,20,12, 6, 6,20,14,21,19, 8,
  71. 21,20,13,15,19, 7,18, 7,20, 7,15, 0, 0, 0, 0, 0 };
  72. const unsigned int viseme_exceptions[] = {
  73. PH('a','I'), 11,
  74. PH('a','U'), 9,
  75. PH('O','I'), 10,
  76. PH('t','S'), 16,
  77. PH('d','Z'), 16,
  78. PH('_','|'), 255,
  79. 0
  80. };
  81. ph_name = phoneme_name & 0xffff;
  82. for(ix=0; (ph = viseme_exceptions[ix]) != 0; ix+=2)
  83. {
  84. if(ph == ph_name)
  85. {
  86. return(viseme_exceptions[ix+1]);
  87. }
  88. }
  89. return(initial_to_viseme[phoneme_name & 0x7f]);
  90. }
  91. int SynthCallback(short *wav, int numsamples, espeak_EVENT *events);
  92. int SynthCallback(short *wav, int numsamples, espeak_EVENT *events)
  93. {//================================================================
  94. int hr;
  95. wchar_t *tailptr;
  96. unsigned int text_offset;
  97. int length;
  98. int phoneme_duration;
  99. int this_viseme;
  100. espeak_EVENT *event;
  101. #define N_EVENTS 100
  102. int n_Events = 0;
  103. SPEVENT *Event;
  104. SPEVENT Events[N_EVENTS];
  105. if(m_OutputSite->GetActions() & SPVES_ABORT)
  106. return(1);
  107. m_EngObj->CheckActions(m_OutputSite);
  108. // return the events
  109. for(event=events; (event->type != 0) && (n_Events < N_EVENTS); event++)
  110. {
  111. audio_latest = event->audio_position + audio_offset;
  112. if((event->type == espeakEVENT_WORD) && (event->length > 0))
  113. {
  114. while(((frag_ix+1) < frag_count) &&
  115. ((event->text_position -1 + frag_offsets[frag_ix+1].cmdlen) >= frag_offsets[frag_ix+1].bufix))
  116. {
  117. frag_ix++;
  118. }
  119. text_offset = frag_offsets[frag_ix].textix +
  120. event->text_position -1 - frag_offsets[frag_ix].bufix + frag_offsets[frag_ix].cmdlen;
  121. length = event->length - frag_offsets[frag_ix].cmdlen;
  122. frag_offsets[frag_ix].cmdlen = 0;
  123. if(text_offset < 0)
  124. text_offset = 0;
  125. Event = &Events[n_Events++];
  126. Event->eEventId = SPEI_WORD_BOUNDARY;
  127. Event->elParamType = SPET_LPARAM_IS_UNDEFINED;
  128. Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10; // ms -> bytes
  129. Event->lParam = text_offset;
  130. Event->wParam = length;
  131. }
  132. if(event->type == espeakEVENT_MARK)
  133. {
  134. Event = &Events[n_Events++];
  135. Event->eEventId = SPEI_TTS_BOOKMARK;
  136. Event->elParamType = SPET_LPARAM_IS_STRING;
  137. Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10; // ms -> bytes
  138. Event->lParam = (long)event->id.name;
  139. Event->wParam = wcstol((wchar_t *)event->id.name,&tailptr,10);
  140. }
  141. if(event->type == espeakEVENT_PHONEME)
  142. {
  143. if(event_interest & SPEI_VISEME)
  144. {
  145. phoneme_duration = audio_latest - prev_phoneme_time;
  146. // ignore some phonemes (which translate to viseme=255)
  147. if((this_viseme = VisemeCode(event->id.number)) != 255)
  148. {
  149. Event = &Events[n_Events++];
  150. Event->eEventId = SPEI_VISEME;
  151. Event->elParamType = SPET_LPARAM_IS_UNDEFINED;
  152. Event->ullAudioStreamOffset = ((prev_phoneme_position + audio_offset) * srate)/10; // ms -> bytes
  153. Event->lParam = phoneme_duration << 16 | this_viseme;
  154. Event->wParam = VisemeCode(prev_phoneme);
  155. prev_phoneme = event->id.number;
  156. prev_phoneme_time = audio_latest;
  157. prev_phoneme_position = event->audio_position;
  158. }
  159. }
  160. }
  161. #ifdef deleted
  162. if(event->type == espeakEVENT_SENTENCE)
  163. {
  164. Event = &Events[n_Events++];
  165. Event->eEventId = SPEI_SENTENCE_BOUNDARY;
  166. Event->elParamType = SPET_LPARAM_IS_UNDEFINED;
  167. Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10; // ms -> bytes
  168. Event->lParam = 0;
  169. Event->wParam = 0; // TEMP
  170. }
  171. #endif
  172. }
  173. if(n_Events > 0)
  174. m_OutputSite->AddEvents(Events, n_Events );
  175. // return the sound data
  176. hr = m_OutputSite->Write(wav, numsamples*2, NULL);
  177. return(hr);
  178. }
  179. static int ConvertRate(int new_rate)
  180. {//=================================
  181. int rate;
  182. static int rate_table[21] = {80,100,115,124,133,142,151,159,168,174,180,
  183. 187,196,208,220,240,270,300,335,369,390 };
  184. rate = new_rate + master_rate;
  185. if(rate < -10) rate = -10;
  186. if(rate > 10) rate = 10;
  187. return(rate_table[rate+10]);
  188. } // end of ConvertRate
  189. static int ConvertPitch(int pitch)
  190. {//===============================
  191. static int pitch_table[41] =
  192. {0, 0, 0, 0, 0, 0, 0, 0, 4, 8,12,16,20,24,28,32,36,40,44,47,50,
  193. 54,58,62,66,70,74,78,82,84,88,92,96,99,99,99,99,99,99,99,99};
  194. // {0,3,5,8,10,13,15,18,20,23,25,28,30,33,35,38,40,43,45,48,50,
  195. // 53,55,58,60,63,65,68,70,73,75,78,80,83,85,88,90,93,95,97,99};
  196. if(pitch < -20) pitch = -20;
  197. if(pitch > 20) pitch = 20;
  198. return(pitch_table[pitch+20]);
  199. }
  200. static int ConvertRange(int range)
  201. {//===============================
  202. static int range_table[21] = {16,28,39,49,58,66,74,81,88,94,100,105,110,115,120,125,130,135,140,145,150};
  203. if(range < -10) range = -10;
  204. if(range > 10) range = 10;
  205. return(range_table[range+10]/2);
  206. }
  207. HRESULT CTTSEngObj::FinalConstruct()
  208. {//=================================
  209. SPDBG_FUNC( "CTTSEngObj::FinalConstruct" );
  210. HRESULT hr = S_OK;
  211. #ifdef LOG_DEBUG
  212. f_log2=fopen("C:\\log_espeak","a");
  213. if(f_log2) fprintf(f_log2,"\n****\n");
  214. #endif
  215. //--- Init vars
  216. m_hVoiceData = NULL;
  217. m_pVoiceData = NULL;
  218. m_pWordList = NULL;
  219. m_ulNumWords = 0;
  220. m_EngObj = this;
  221. return hr;
  222. } /* CTTSEngObj::FinalConstruct */
  223. void CTTSEngObj::FinalRelease()
  224. {//============================
  225. SPDBG_FUNC( "CTTSEngObj::FinalRelease" );
  226. delete m_pWordList;
  227. #ifdef LOG_DEBUG
  228. if(f_log2!=NULL) fclose(f_log2);
  229. #endif
  230. if( m_pVoiceData )
  231. {
  232. ::UnmapViewOfFile( (void*)m_pVoiceData );
  233. }
  234. if( m_hVoiceData )
  235. {
  236. ::CloseHandle( m_hVoiceData );
  237. }
  238. } /* CTTSEngObj::FinalRelease */
  239. //
  240. //=== ISpObjectWithToken Implementation ======================================
  241. //
  242. void WcharToChar(char *out, const wchar_t *in, int len)
  243. {//====================================================
  244. int ix;
  245. for(ix=0; ix<len; ix++)
  246. {
  247. if((out[ix] = (char)in[ix]) == 0)
  248. break;
  249. }
  250. out[len-1] = 0;
  251. }
  252. /*****************************************************************************
  253. * CTTSEngObj::SetObjectToken *
  254. *----------------------------*
  255. * Description:
  256. * Read the "VoiceName" attribute from the registry, and use it to select
  257. * an eSpeak voice file
  258. *****************************************************************************/
  259. STDMETHODIMP CTTSEngObj::SetObjectToken(ISpObjectToken * pToken)
  260. {
  261. char voice[80];
  262. strcpy(voice,"default");
  263. SPDBG_FUNC( "CTTSEngObj::SetObjectToken" );
  264. HRESULT hr = SpGenericSetObjectToken(pToken, m_cpToken);
  265. if( SUCCEEDED( hr ) )
  266. {
  267. CSpDynamicString voicename;
  268. CSpDynamicString path;
  269. HRESULT hr2;
  270. int len;
  271. hr2 = m_cpToken->GetStringValue( L"VoiceName", &voicename);
  272. if( SUCCEEDED(hr2) )
  273. {
  274. WcharToChar(voice,voicename,sizeof(voice));
  275. }
  276. hr2 = m_cpToken->GetStringValue( L"Path", &path);
  277. if( SUCCEEDED(hr2) )
  278. {
  279. len = wcslen(path)+1;
  280. path_install = (char *)malloc(len);
  281. WcharToChar(path_install,path,len);
  282. }
  283. }
  284. gVolume = 100;
  285. gSpeed = -1;
  286. gPitch = -1;
  287. gRange = -1;
  288. gEmphasis = 0;
  289. gSayas = 0;
  290. espeak_Initialize(AUDIO_OUTPUT_SYNCHRONOUS,100,path_install,1);
  291. espeak_SetVoiceByName(voice);
  292. espeak_SetSynthCallback(SynthCallback);
  293. return hr;
  294. } /* CTTSEngObj::SetObjectToken */
  295. //
  296. //=== ISpTTSEngine Implementation ============================================
  297. //
  298. #define L(c1,c2) (c1<<8)+c2 // combine two characters into an integer
  299. static char *phoneme_names_en[] = {
  300. NULL,NULL,NULL," ",NULL,NULL,NULL,NULL,"'",",",
  301. "A:","a","V","0","aU","@","aI",
  302. "b","tS","d","D","E","3:","eI",
  303. "f","g","h","I","i:","dZ","k",
  304. "l","m","n","N","oU","OI","p",
  305. "r","s","S","t","T","U","u:",
  306. "v","w","j","z","Z",
  307. NULL
  308. };
  309. int CTTSEngObj::WritePhonemes(SPPHONEID *phons, wchar_t *pW)
  310. {//=========================================================
  311. int ph;
  312. int ix=2;
  313. int skip=0;
  314. int maxph = 49;
  315. char *p;
  316. int j;
  317. int lang;
  318. char **phoneme_names;
  319. char phbuf[200];
  320. espeak_VOICE *voice;
  321. voice = espeak_GetCurrentVoice();
  322. lang = (voice->languages[1] << 8) + (voice->languages[2]);
  323. phoneme_names = phoneme_names_en;
  324. maxph = 0;
  325. if(lang == L('e','n'))
  326. {
  327. phoneme_names = phoneme_names_en;
  328. maxph = 49;
  329. }
  330. if(maxph == 0)
  331. return(0);
  332. strcpy(phbuf,"[[");
  333. while(((ph = *phons++) != 0) && (ix < (sizeof(phbuf) - 3)))
  334. {
  335. if(skip)
  336. {
  337. skip = 0;
  338. continue;
  339. }
  340. if(ph > maxph)
  341. continue;
  342. p = phoneme_names[phons[0]]; // look at the phoneme after this one
  343. if(p != NULL)
  344. {
  345. if(p[0] == '\'')
  346. {
  347. phbuf[ix++] = '\''; // primary stress, put before the vowel, not after
  348. skip=1;
  349. }
  350. if(p[0] == ',')
  351. {
  352. phbuf[ix++] = ','; // secondary stress
  353. skip=1;
  354. }
  355. }
  356. p = phoneme_names[ph]; // look at this phoneme
  357. if(p != NULL)
  358. {
  359. strcpy(&phbuf[ix],p);
  360. ix += strlen(p);
  361. }
  362. }
  363. strcpy(&phbuf[ix],"]]");
  364. ix += 2;
  365. if(pW != NULL)
  366. {
  367. for(j=0; j<=ix; j++)
  368. {
  369. pW[j] = phbuf[j];
  370. }
  371. }
  372. return(strlen(phbuf));
  373. }
  374. int CTTSEngObj::ProcessFragList(const SPVTEXTFRAG* pTextFragList, wchar_t *pW_start, ISpTTSEngineSite* pOutputSite, int *n_text)
  375. {//============================================================================================================================
  376. int action;
  377. int control;
  378. wchar_t *pW;
  379. const SPVSTATE *state;
  380. unsigned int ix;
  381. unsigned int len;
  382. unsigned int total=0;
  383. char cmdbuf[50];
  384. wchar_t markbuf[32];
  385. int speed;
  386. int volume;
  387. int pitch;
  388. int range;
  389. int emphasis;
  390. int sayas;
  391. unsigned int text_offset = 0;
  392. frag_count = 0;
  393. frag_ix = 0;
  394. pW = pW_start;
  395. while(pTextFragList != NULL)
  396. {
  397. action = pTextFragList->State.eAction;
  398. control = pOutputSite->GetActions();
  399. len = pTextFragList->ulTextLen;
  400. if(control & SPVES_ABORT)
  401. break;
  402. CheckActions(pOutputSite);
  403. sayas = 0;
  404. state = &pTextFragList->State;
  405. switch(action)
  406. {
  407. case SPVA_SpellOut:
  408. sayas = 0x12; // SAYAS_CHARS; // drop through to SPVA_Speak
  409. case SPVA_Speak:
  410. text_offset = pTextFragList->ulTextSrcOffset;
  411. audio_offset = audio_latest;
  412. #ifdef deleted
  413. // attempt to recognise when JAWS is spelling, it doesn't use SPVA_SpellOut
  414. if((pW != NULL) && (*n_text == 1) && ((len == 1) || ((len==2) && (pTextFragList->pTextStart[1]==' '))))
  415. {
  416. // A single text fragment with one character. Speak as a character, not a word
  417. sayas = 0x11;
  418. gSayas = 0;
  419. }
  420. #endif
  421. if(frag_count >= n_frag_offsets)
  422. {
  423. if((frag_offsets = (FRAG_OFFSET *)realloc(frag_offsets,sizeof(FRAG_OFFSET)*(frag_count+500))) != NULL)
  424. {
  425. n_frag_offsets = frag_count+500;
  426. }
  427. }
  428. // first set the volume, rate, pitch
  429. volume = (state->Volume * master_volume)/100;
  430. speed = ConvertRate(state->RateAdj);
  431. pitch = ConvertPitch(state->PitchAdj.MiddleAdj);
  432. range = ConvertRange(state->PitchAdj.RangeAdj);
  433. emphasis = state->EmphAdj;
  434. if(emphasis != 0)
  435. emphasis = 3;
  436. len = 0;
  437. if(volume != gVolume)
  438. {
  439. sprintf(&cmdbuf[len],"%c%dA",CTRL_EMBEDDED,volume);
  440. len += strlen(&cmdbuf[len]);
  441. }
  442. if(speed != gSpeed)
  443. {
  444. sprintf(&cmdbuf[len],"%c%dS",CTRL_EMBEDDED,speed);
  445. len += strlen(&cmdbuf[len]);
  446. }
  447. if(pitch != gPitch)
  448. {
  449. sprintf(&cmdbuf[len],"%c%dP",CTRL_EMBEDDED,pitch);
  450. len += strlen(&cmdbuf[len]);
  451. }
  452. if(range != gRange)
  453. {
  454. sprintf(&cmdbuf[len],"%c%dR",CTRL_EMBEDDED,range);
  455. len += strlen(&cmdbuf[len]);
  456. }
  457. if(emphasis != gEmphasis)
  458. {
  459. sprintf(&cmdbuf[len],"%c%dF",CTRL_EMBEDDED,emphasis);
  460. len += strlen(&cmdbuf[len]);
  461. }
  462. if(sayas != gSayas)
  463. {
  464. sprintf(&cmdbuf[len],"%c%dY",CTRL_EMBEDDED,sayas);
  465. len += strlen(&cmdbuf[len]);
  466. }
  467. gVolume = volume;
  468. gSpeed = speed;
  469. gPitch = pitch;
  470. gRange = range;
  471. gEmphasis = emphasis;
  472. gSayas = sayas;
  473. total += (len + pTextFragList->ulTextLen);
  474. if(pTextFragList->ulTextLen > 0)
  475. {
  476. total++;
  477. }
  478. if(pW != NULL)
  479. {
  480. for(ix=0; ix<len; ix++)
  481. {
  482. *pW++ = cmdbuf[ix];
  483. }
  484. frag_offsets[frag_count].textix = text_offset;
  485. frag_offsets[frag_count].bufix = pW - pW_start;
  486. frag_offsets[frag_count].cmdlen = len;
  487. for(ix=0; ix<pTextFragList->ulTextLen; ix++)
  488. {
  489. *pW++ = pTextFragList->pTextStart[ix];
  490. }
  491. if(pTextFragList->ulTextLen > 0)
  492. {
  493. *pW++ = ' ';
  494. }
  495. }
  496. frag_count++;
  497. break;
  498. case SPVA_Bookmark:
  499. total += (2 + pTextFragList->ulTextLen);
  500. if(pW != NULL)
  501. {
  502. int index;
  503. for(ix=0; ix<pTextFragList->ulTextLen; ix++)
  504. {
  505. markbuf[ix] = (char )pTextFragList->pTextStart[ix];
  506. }
  507. markbuf[ix] = 0;
  508. if((index = AddNameData((const char *)markbuf,1)) >= 0)
  509. {
  510. sprintf(cmdbuf,"%c%dM",CTRL_EMBEDDED,index);
  511. len = strlen(cmdbuf);
  512. for(ix=0; ix<len; ix++)
  513. {
  514. *pW++ = cmdbuf[ix];
  515. }
  516. }
  517. }
  518. break;
  519. case SPVA_Pronounce:
  520. total += WritePhonemes(state->pPhoneIds, pW);
  521. if(pW != NULL)
  522. {
  523. pW += total;
  524. }
  525. break;
  526. }
  527. pTextFragList = pTextFragList->pNext;
  528. }
  529. if(pW != NULL)
  530. {
  531. *pW = 0;
  532. }
  533. *n_text = frag_count;
  534. return(total);
  535. } // end of ProcessFragList
  536. /*****************************************************************************
  537. * CTTSEngObj::Speak *
  538. *-------------------*
  539. * Description:
  540. * This is the primary method that SAPI calls to render text.
  541. *-----------------------------------------------------------------------------
  542. * Input Parameters
  543. *
  544. * pUser
  545. * Pointer to the current user profile object. This object contains
  546. * information like what languages are being used and this object
  547. * also gives access to resources like the SAPI master lexicon object.
  548. *
  549. * dwSpeakFlags
  550. * This is a set of flags used to control the behavior of the
  551. * SAPI voice object and the associated engine.
  552. *
  553. * VoiceFmtIndex
  554. * Zero based index specifying the output format that should
  555. * be used during rendering.
  556. *
  557. * pTextFragList
  558. * A linked list of text fragments to be rendered. There is
  559. * one fragement per XML state change. If the input text does
  560. * not contain any XML markup, there will only be a single fragment.
  561. *
  562. * pOutputSite
  563. * The interface back to SAPI where all output audio samples and events are written.
  564. *
  565. * Return Values
  566. * S_OK - This should be returned after successful rendering or if
  567. * rendering was interrupted because *pfContinue changed to FALSE.
  568. * E_INVALIDARG
  569. * E_OUTOFMEMORY
  570. *
  571. *****************************************************************************/
  572. STDMETHODIMP CTTSEngObj::Speak( DWORD dwSpeakFlags,
  573. REFGUID rguidFormatId,
  574. const WAVEFORMATEX * pWaveFormatEx,
  575. const SPVTEXTFRAG* pTextFragList,
  576. ISpTTSEngineSite* pOutputSite )
  577. {
  578. SPDBG_FUNC( "CTTSEngObj::Speak" );
  579. HRESULT hr = S_OK;
  580. unsigned int size;
  581. int xVolume;
  582. int xSpeed;
  583. int xPitch;
  584. int xRange;
  585. int xEmphasis;
  586. int xSayas;
  587. int punctuation;
  588. int n_text_frag=0;
  589. //--- Check args
  590. if( SP_IS_BAD_INTERFACE_PTR( pOutputSite ) ||
  591. SP_IS_BAD_READ_PTR( pTextFragList ) )
  592. {
  593. hr = E_INVALIDARG;
  594. }
  595. else
  596. {
  597. InitNamedata();
  598. //--- Init some vars
  599. m_pCurrFrag = pTextFragList;
  600. m_pNextChar = m_pCurrFrag->pTextStart;
  601. m_pEndChar = m_pNextChar + m_pCurrFrag->ulTextLen;
  602. m_ullAudioOff = 0;
  603. m_OutputSite = pOutputSite;
  604. pOutputSite->GetEventInterest(&event_interest);
  605. xVolume = gVolume;
  606. xSpeed = gSpeed;
  607. xPitch = gPitch;
  608. xRange = gRange;
  609. xEmphasis = gEmphasis;
  610. xSayas = gSayas;
  611. // find the size of the text buffer needed for this Speak() request
  612. size = ProcessFragList(pTextFragList,NULL,pOutputSite,&n_text_frag);
  613. gVolume = xVolume;
  614. gSpeed = xSpeed;
  615. gPitch = xPitch;
  616. gRange = xRange;
  617. gEmphasis = xEmphasis;
  618. gSayas = xSayas;
  619. punctuation = 0;
  620. if(dwSpeakFlags & SPF_NLP_SPEAK_PUNC)
  621. punctuation = 1;
  622. espeak_SetParameter(espeakPUNCTUATION,punctuation,0);
  623. size = (size + 50)*sizeof(wchar_t);
  624. if(size > gBufSize)
  625. {
  626. size += 1000; // some extra so we don't need to realloc() again too often
  627. TextBuf = (wchar_t *)realloc(TextBuf,size);
  628. if(TextBuf == NULL)
  629. {
  630. gBufSize=0;
  631. return(1);
  632. }
  633. gBufSize = size;
  634. }
  635. audio_latest = 0;
  636. prev_phoneme = 0;
  637. prev_phoneme_time = 0;
  638. prev_phoneme_position = 0;
  639. size = ProcessFragList(pTextFragList,TextBuf,pOutputSite,&n_text_frag);
  640. if(size > 0)
  641. {
  642. espeak_Synth(TextBuf,0,0,POS_CHARACTER,0,espeakCHARS_WCHAR | espeakKEEP_NAMEDATA | espeakPHONEMES,NULL,NULL);
  643. }
  644. }
  645. return hr;
  646. } /* CTTSEngObj::Speak */
  647. HRESULT CTTSEngObj::CheckActions( ISpTTSEngineSite* pOutputSite )
  648. {//==============================================================
  649. int control;
  650. USHORT volume;
  651. long rate;
  652. control = pOutputSite->GetActions();
  653. if(control & SPVES_VOLUME)
  654. {
  655. if(pOutputSite->GetVolume(&volume) == S_OK)
  656. {
  657. master_volume = volume;
  658. }
  659. }
  660. if(control & SPVES_RATE)
  661. {
  662. if(pOutputSite->GetRate(&rate) == S_OK)
  663. {
  664. master_rate = rate;
  665. }
  666. }
  667. return(S_OK);
  668. } // end of CTTSEngObj::CheckActions
  669. STDMETHODIMP CTTSEngObj::GetOutputFormat( const GUID * pTargetFormatId, const WAVEFORMATEX * pTargetWaveFormatEx,
  670. GUID * pDesiredFormatId, WAVEFORMATEX ** ppCoMemDesiredWaveFormatEx )
  671. {//========================================================================
  672. SPDBG_FUNC( "CTTSEngObj::GetVoiceFormat" );
  673. HRESULT hr = S_OK;
  674. enum SPSTREAMFORMAT sample_rate = SPSF_22kHz16BitMono;
  675. srate = 441;
  676. if(espeak_GetParameter(espeakVOICETYPE,1) == 1)
  677. {
  678. srate = 320;
  679. sample_rate = SPSF_16kHz16BitMono; // an mbrola voice
  680. }
  681. hr = SpConvertStreamFormatEnum(sample_rate, pDesiredFormatId, ppCoMemDesiredWaveFormatEx);
  682. return hr;
  683. } /* CTTSEngObj::GetVoiceFormat */
  684. int FAR PASCAL CompileDictionary(const char *voice, const char *path_log)
  685. {//===========================================================
  686. FILE *f_log3;
  687. char fname[120];
  688. f_log3 = fopen(path_log,"w");
  689. sprintf(fname,"%s/",path_install);
  690. espeak_SetVoiceByName(voice);
  691. espeak_CompileDictionary(fname,f_log3,0);
  692. fclose(f_log3);
  693. return(0);
  694. }