eSpeak NG is an open source speech synthesizer that supports more than hundred languages and accents.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

ttsengobj.cpp 23KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931
  1. /***************************************************************************
  2. * Copyright (C) 2005 to 2007 by Jonathan Duddington *
  3. * email: [email protected] *
  4. * *
  5. * This program is free software; you can redistribute it and/or modify *
  6. * it under the terms of the GNU General Public License as published by *
  7. * the Free Software Foundation; either version 3 of the License, or *
  8. * (at your option) any later version. *
  9. * *
  10. * This program is distributed in the hope that it will be useful, *
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  13. * GNU General Public License for more details. *
  14. * *
  15. * You should have received a copy of the GNU General Public License *
  16. * along with this program; if not, write see: *
  17. * <http://www.gnu.org/licenses/>. *
  18. ***************************************************************************/
  19. #include "stdafx.h"
  20. #include "TtsEngObj.h"
  21. #include "src/speak_lib.h"
  22. #include "stdio.h"
  23. #define CTRL_EMBEDDED 1
  24. CTTSEngObj *m_EngObj;
  25. ISpTTSEngineSite* m_OutputSite;
  26. FILE *f_log2=NULL;
  27. ULONGLONG event_interest;
  28. extern int AddNameData(const char *name, int wide);
  29. extern void InitNamedata(void);
  30. int master_volume = 100;
  31. int master_rate = 0;
  32. int initialised = 0;
  33. int gVolume = 100;
  34. int gSpeed = -1;
  35. int gPitch = -1;
  36. int gRange = -1;
  37. int gEmphasis = 0;
  38. int gSayas = 0;
  39. char g_voice_name[80];
  40. char *path_install = NULL;
  41. unsigned long audio_offset = 0;
  42. unsigned long audio_latest = 0;
  43. int prev_phoneme = 0;
  44. int prev_phoneme_position = 0;
  45. unsigned long prev_phoneme_time = 0;
  46. unsigned int gBufSize = 0;
  47. wchar_t *TextBuf=NULL;
  48. typedef struct {
  49. unsigned int bufix;
  50. unsigned int textix;
  51. unsigned int cmdlen;
  52. } FRAG_OFFSET;
  53. int srate; // samplerate, Hz/50
  54. int n_frag_offsets = 0;
  55. int frag_ix = 0;
  56. int frag_count=0;
  57. FRAG_OFFSET *frag_offsets = NULL;
  58. //#define TEST_INPUT // printf input text received from SAPI to espeak_text_log.txt
  59. #ifdef TEST_INPUT
  60. static int utf8_out(unsigned int c, char *buf)
  61. {//====================================
  62. // write a unicode character into a buffer as utf8
  63. // returns the number of bytes written
  64. int n_bytes;
  65. int j;
  66. int shift;
  67. static char unsigned code[4] = {0,0xc0,0xe0,0xf0};
  68. if(c < 0x80)
  69. {
  70. buf[0] = c;
  71. return(1);
  72. }
  73. if(c >= 0x110000)
  74. {
  75. buf[0] = ' '; // out of range character code
  76. return(1);
  77. }
  78. if(c < 0x0800)
  79. n_bytes = 1;
  80. else
  81. if(c < 0x10000)
  82. n_bytes = 2;
  83. else
  84. n_bytes = 3;
  85. shift = 6*n_bytes;
  86. buf[0] = code[n_bytes] | (c >> shift);
  87. for(j=0; j<n_bytes; j++)
  88. {
  89. shift -= 6;
  90. buf[j+1] = 0x80 + ((c >> shift) & 0x3f);
  91. }
  92. return(n_bytes+1);
  93. } // end of utf8_out
  94. #endif
  95. int VisemeCode(unsigned int phoneme_name)
  96. {//======================================
  97. // Convert eSpeak phoneme name into a SAPI viseme code
  98. int ix;
  99. unsigned int ph;
  100. unsigned int ph_name;
  101. #define PH(c1,c2) (c2<<8)+c1 // combine two characters into an integer for phoneme name
  102. const unsigned char initial_to_viseme[128] = {
  103. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  104. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  105. 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,19, 0, 0, 0, 0, 0,
  106. 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,255,
  107. 4, 2,18,16,17, 4,18,20,12, 6,16,20,14,21,20, 3,
  108. 21,20,13,16,17, 4, 1, 5,20, 7,16, 0, 0, 0, 0, 0,
  109. 0, 1,21,16,19, 4,18,20,12, 6, 6,20,14,21,19, 8,
  110. 21,20,13,15,19, 7,18, 7,20, 7,15, 0, 0, 0, 0, 0 };
  111. const unsigned int viseme_exceptions[] = {
  112. PH('a','I'), 11,
  113. PH('a','U'), 9,
  114. PH('O','I'), 10,
  115. PH('t','S'), 16,
  116. PH('d','Z'), 16,
  117. PH('_','|'), 255,
  118. 0
  119. };
  120. ph_name = phoneme_name & 0xffff;
  121. for(ix=0; (ph = viseme_exceptions[ix]) != 0; ix+=2)
  122. {
  123. if(ph == ph_name)
  124. {
  125. return(viseme_exceptions[ix+1]);
  126. }
  127. }
  128. return(initial_to_viseme[phoneme_name & 0x7f]);
  129. }
  130. int SynthCallback(short *wav, int numsamples, espeak_EVENT *events);
  131. int SynthCallback(short *wav, int numsamples, espeak_EVENT *events)
  132. {//================================================================
  133. int hr;
  134. wchar_t *tailptr;
  135. unsigned int text_offset;
  136. int length;
  137. int phoneme_duration;
  138. int this_viseme;
  139. espeak_EVENT *event;
  140. #define N_EVENTS 100
  141. int n_Events = 0;
  142. SPEVENT *Event;
  143. SPEVENT Events[N_EVENTS];
  144. if(m_OutputSite->GetActions() & SPVES_ABORT)
  145. return(1);
  146. m_EngObj->CheckActions(m_OutputSite);
  147. // return the events
  148. for(event=events; (event->type != 0) && (n_Events < N_EVENTS); event++)
  149. {
  150. audio_latest = event->audio_position + audio_offset;
  151. if((event->type == espeakEVENT_WORD) && (event->length > 0))
  152. {
  153. while(((frag_ix+1) < frag_count) &&
  154. ((event->text_position -1 + frag_offsets[frag_ix+1].cmdlen) >= frag_offsets[frag_ix+1].bufix))
  155. {
  156. frag_ix++;
  157. }
  158. text_offset = frag_offsets[frag_ix].textix +
  159. event->text_position -1 - frag_offsets[frag_ix].bufix + frag_offsets[frag_ix].cmdlen;
  160. length = event->length - frag_offsets[frag_ix].cmdlen;
  161. frag_offsets[frag_ix].cmdlen = 0;
  162. if(text_offset < 0)
  163. text_offset = 0;
  164. Event = &Events[n_Events++];
  165. Event->eEventId = SPEI_WORD_BOUNDARY;
  166. Event->elParamType = SPET_LPARAM_IS_UNDEFINED;
  167. Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10; // ms -> bytes
  168. Event->lParam = text_offset;
  169. Event->wParam = length;
  170. }
  171. if(event->type == espeakEVENT_MARK)
  172. {
  173. Event = &Events[n_Events++];
  174. Event->eEventId = SPEI_TTS_BOOKMARK;
  175. Event->elParamType = SPET_LPARAM_IS_STRING;
  176. Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10; // ms -> bytes
  177. Event->lParam = (long)event->id.name;
  178. Event->wParam = wcstol((wchar_t *)event->id.name,&tailptr,10);
  179. }
  180. if(event->type == espeakEVENT_PHONEME)
  181. {
  182. if(event_interest & SPEI_VISEME)
  183. {
  184. phoneme_duration = audio_latest - prev_phoneme_time;
  185. // ignore some phonemes (which translate to viseme=255)
  186. if((this_viseme = VisemeCode(event->id.number)) != 255)
  187. {
  188. Event = &Events[n_Events++];
  189. Event->eEventId = SPEI_VISEME;
  190. Event->elParamType = SPET_LPARAM_IS_UNDEFINED;
  191. Event->ullAudioStreamOffset = ((prev_phoneme_position + audio_offset) * srate)/10; // ms -> bytes
  192. Event->lParam = phoneme_duration << 16 | this_viseme;
  193. Event->wParam = VisemeCode(prev_phoneme);
  194. prev_phoneme = event->id.number;
  195. prev_phoneme_time = audio_latest;
  196. prev_phoneme_position = event->audio_position;
  197. }
  198. }
  199. }
  200. #ifdef deleted
  201. if(event->type == espeakEVENT_SENTENCE)
  202. {
  203. Event = &Events[n_Events++];
  204. Event->eEventId = SPEI_SENTENCE_BOUNDARY;
  205. Event->elParamType = SPET_LPARAM_IS_UNDEFINED;
  206. Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10; // ms -> bytes
  207. Event->lParam = 0;
  208. Event->wParam = 0; // TEMP
  209. }
  210. #endif
  211. }
  212. if(n_Events > 0)
  213. m_OutputSite->AddEvents(Events, n_Events );
  214. // return the sound data
  215. hr = m_OutputSite->Write(wav, numsamples*2, NULL);
  216. return(hr);
  217. }
  218. static int ConvertRate(int new_rate)
  219. {//=================================
  220. int rate;
  221. static int rate_table[21] = {
  222. 80,110,124,135,145,155,165,173,180,187,
  223. 196,
  224. 208,220,245,270,300,335,375,420,470,530 };
  225. rate = new_rate + master_rate;
  226. if(rate < -10) rate = -10;
  227. if(rate > 10) rate = 10;
  228. return(rate_table[rate+10]);
  229. } // end of ConvertRate
  230. static int ConvertPitch(int pitch)
  231. {//===============================
  232. static int pitch_table[41] =
  233. {0, 0, 0, 0, 0, 0, 0, 0, 4, 8,12,16,20,24,28,32,36,40,44,47,50,
  234. 54,58,62,66,70,74,78,82,84,88,92,96,99,99,99,99,99,99,99,99};
  235. // {0,3,5,8,10,13,15,18,20,23,25,28,30,33,35,38,40,43,45,48,50,
  236. // 53,55,58,60,63,65,68,70,73,75,78,80,83,85,88,90,93,95,97,99};
  237. if(pitch < -20) pitch = -20;
  238. if(pitch > 20) pitch = 20;
  239. return(pitch_table[pitch+20]);
  240. }
  241. static int ConvertRange(int range)
  242. {//===============================
  243. static int range_table[21] = {16,28,39,49,58,66,74,81,88,94,100,105,110,115,120,125,130,135,140,145,150};
  244. if(range < -10) range = -10;
  245. if(range > 10) range = 10;
  246. return(range_table[range+10]/2);
  247. }
  248. HRESULT CTTSEngObj::FinalConstruct()
  249. {//=================================
  250. SPDBG_FUNC( "CTTSEngObj::FinalConstruct" );
  251. HRESULT hr = S_OK;
  252. #ifdef LOG_DEBUG
  253. f_log2=fopen("C:\\log_espeak","a");
  254. if(f_log2) fprintf(f_log2,"\n****\n");
  255. #endif
  256. //--- Init vars
  257. m_hVoiceData = NULL;
  258. m_pVoiceData = NULL;
  259. m_pWordList = NULL;
  260. m_ulNumWords = 0;
  261. m_EngObj = this;
  262. return hr;
  263. } /* CTTSEngObj::FinalConstruct */
  264. void CTTSEngObj::FinalRelease()
  265. {//============================
  266. SPDBG_FUNC( "CTTSEngObj::FinalRelease" );
  267. delete m_pWordList;
  268. #ifdef LOG_DEBUG
  269. if(f_log2!=NULL) fclose(f_log2);
  270. #endif
  271. if( m_pVoiceData )
  272. {
  273. ::UnmapViewOfFile( (void*)m_pVoiceData );
  274. }
  275. if( m_hVoiceData )
  276. {
  277. ::CloseHandle( m_hVoiceData );
  278. }
  279. } /* CTTSEngObj::FinalRelease */
  280. //
  281. //=== ISpObjectWithToken Implementation ======================================
  282. //
  283. void WcharToChar(char *out, const wchar_t *in, int len)
  284. {//====================================================
  285. int ix;
  286. for(ix=0; ix<len; ix++)
  287. {
  288. if((out[ix] = (char)in[ix]) == 0)
  289. break;
  290. }
  291. out[len-1] = 0;
  292. }
  293. /*****************************************************************************
  294. * CTTSEngObj::SetObjectToken *
  295. *----------------------------*
  296. * Description:
  297. * Read the "VoiceName" attribute from the registry, and use it to select
  298. * an eSpeak voice file
  299. *****************************************************************************/
  300. STDMETHODIMP CTTSEngObj::SetObjectToken(ISpObjectToken * pToken)
  301. {
  302. strcpy(voice_name,"default");
  303. SPDBG_FUNC( "CTTSEngObj::SetObjectToken" );
  304. HRESULT hr = SpGenericSetObjectToken(pToken, m_cpToken);
  305. if( SUCCEEDED( hr ) )
  306. {
  307. CSpDynamicString voicename;
  308. CSpDynamicString path;
  309. HRESULT hr2;
  310. int len;
  311. hr2 = m_cpToken->GetStringValue( L"VoiceName", &voicename);
  312. if( SUCCEEDED(hr2) )
  313. {
  314. WcharToChar(voice_name,voicename,sizeof(voice_name));
  315. }
  316. hr2 = m_cpToken->GetStringValue( L"Path", &path);
  317. if( SUCCEEDED(hr2) )
  318. {
  319. len = wcslen(path)+1;
  320. path_install = (char *)malloc(len);
  321. WcharToChar(path_install,path,len);
  322. }
  323. }
  324. gVolume = 100;
  325. gSpeed = -1;
  326. gPitch = -1;
  327. gRange = -1;
  328. gEmphasis = 0;
  329. gSayas = 0;
  330. if(initialised==0)
  331. {
  332. espeak_Initialize(AUDIO_OUTPUT_SYNCHRONOUS,100,path_install,1);
  333. espeak_SetSynthCallback(SynthCallback);
  334. initialised = 1;
  335. // g_voice_name[0] = 0;
  336. }
  337. strcpy(g_voice_name, voice_name);
  338. espeak_SetVoiceByName(g_voice_name);
  339. return hr;
  340. } /* CTTSEngObj::SetObjectToken */
  341. //
  342. //=== ISpTTSEngine Implementation ============================================
  343. //
  344. #define L(c1,c2) (c1<<8)+c2 // combine two characters into an integer
  345. static char *phoneme_names_en[] = {
  346. NULL,NULL,NULL," ",NULL,NULL,NULL,NULL,"'",",",
  347. "A:","a","V","0","aU","@","aI",
  348. "b","tS","d","D","E","3:","eI",
  349. "f","g","h","I","i:","dZ","k",
  350. "l","m","n","N","oU","OI","p",
  351. "r","s","S","t","T","U","u:",
  352. "v","w","j","z","Z",
  353. NULL
  354. };
  355. int CTTSEngObj::WritePhonemes(SPPHONEID *phons, wchar_t *pW)
  356. {//=========================================================
  357. int ph;
  358. int ix=2;
  359. int skip=0;
  360. int maxph = 49;
  361. char *p;
  362. int j;
  363. int lang;
  364. char **phoneme_names;
  365. char phbuf[200];
  366. espeak_VOICE *voice;
  367. voice = espeak_GetCurrentVoice();
  368. lang = (voice->languages[1] << 8) + (voice->languages[2]);
  369. phoneme_names = phoneme_names_en;
  370. maxph = 0;
  371. if(lang == L('e','n'))
  372. {
  373. phoneme_names = phoneme_names_en;
  374. maxph = 49;
  375. }
  376. if(maxph == 0)
  377. return(0);
  378. strcpy(phbuf,"[[");
  379. while(((ph = *phons++) != 0) && (ix < (sizeof(phbuf) - 3)))
  380. {
  381. if(skip)
  382. {
  383. skip = 0;
  384. continue;
  385. }
  386. if(ph > maxph)
  387. continue;
  388. p = phoneme_names[phons[0]]; // look at the phoneme after this one
  389. if(p != NULL)
  390. {
  391. if(p[0] == '\'')
  392. {
  393. phbuf[ix++] = '\''; // primary stress, put before the vowel, not after
  394. skip=1;
  395. }
  396. if(p[0] == ',')
  397. {
  398. phbuf[ix++] = ','; // secondary stress
  399. skip=1;
  400. }
  401. }
  402. p = phoneme_names[ph]; // look at this phoneme
  403. if(p != NULL)
  404. {
  405. strcpy(&phbuf[ix],p);
  406. ix += strlen(p);
  407. }
  408. }
  409. strcpy(&phbuf[ix],"]]");
  410. ix += 2;
  411. if(pW != NULL)
  412. {
  413. for(j=0; j<=ix; j++)
  414. {
  415. pW[j] = phbuf[j];
  416. }
  417. }
  418. return(strlen(phbuf));
  419. }
  420. int CTTSEngObj::ProcessFragList(const SPVTEXTFRAG* pTextFragList, wchar_t *pW_start, ISpTTSEngineSite* pOutputSite, int *n_text)
  421. {//============================================================================================================================
  422. int action;
  423. int control;
  424. wchar_t *pW;
  425. const SPVSTATE *state;
  426. unsigned int ix;
  427. unsigned int len;
  428. unsigned int total=0;
  429. char cmdbuf[50];
  430. wchar_t markbuf[32];
  431. int speed;
  432. int volume;
  433. int pitch;
  434. int range;
  435. int emphasis;
  436. int sayas;
  437. unsigned int text_offset = 0;
  438. frag_count = 0;
  439. frag_ix = 0;
  440. pW = pW_start;
  441. // check that the current voice is correct for this request
  442. if(strcmp(voice_name, g_voice_name) != 0)
  443. {
  444. strcpy(g_voice_name, voice_name);
  445. espeak_SetVoiceByName(g_voice_name);
  446. }
  447. while(pTextFragList != NULL)
  448. {
  449. action = pTextFragList->State.eAction;
  450. control = pOutputSite->GetActions();
  451. len = pTextFragList->ulTextLen;
  452. if(control & SPVES_ABORT)
  453. break;
  454. CheckActions(pOutputSite);
  455. sayas = 0;
  456. state = &pTextFragList->State;
  457. switch(action)
  458. {
  459. case SPVA_SpellOut:
  460. sayas = 0x12; // SAYAS_CHARS; // drop through to SPVA_Speak
  461. case SPVA_Speak:
  462. text_offset = pTextFragList->ulTextSrcOffset;
  463. audio_offset = audio_latest;
  464. #ifdef deleted
  465. // attempt to recognise when JAWS is spelling, it doesn't use SPVA_SpellOut
  466. if((pW != NULL) && (*n_text == 1) && ((len == 1) || ((len==2) && (pTextFragList->pTextStart[1]==' '))))
  467. {
  468. // A single text fragment with one character. Speak as a character, not a word
  469. sayas = 0x11;
  470. gSayas = 0;
  471. }
  472. #endif
  473. if(frag_count >= n_frag_offsets)
  474. {
  475. if((frag_offsets = (FRAG_OFFSET *)realloc(frag_offsets,sizeof(FRAG_OFFSET)*(frag_count+500))) != NULL)
  476. {
  477. n_frag_offsets = frag_count+500;
  478. }
  479. }
  480. // first set the volume, rate, pitch
  481. volume = (state->Volume * master_volume)/100;
  482. speed = ConvertRate(state->RateAdj);
  483. pitch = ConvertPitch(state->PitchAdj.MiddleAdj);
  484. range = ConvertRange(state->PitchAdj.RangeAdj);
  485. emphasis = state->EmphAdj;
  486. if(emphasis != 0)
  487. emphasis = 3;
  488. len = 0;
  489. if(volume != gVolume)
  490. {
  491. sprintf(&cmdbuf[len],"%c%dA",CTRL_EMBEDDED,volume);
  492. len += strlen(&cmdbuf[len]);
  493. }
  494. if(speed != gSpeed)
  495. {
  496. sprintf(&cmdbuf[len],"%c%dS",CTRL_EMBEDDED,speed);
  497. len += strlen(&cmdbuf[len]);
  498. }
  499. if(pitch != gPitch)
  500. {
  501. sprintf(&cmdbuf[len],"%c%dP",CTRL_EMBEDDED,pitch);
  502. len += strlen(&cmdbuf[len]);
  503. }
  504. if(range != gRange)
  505. {
  506. sprintf(&cmdbuf[len],"%c%dR",CTRL_EMBEDDED,range);
  507. len += strlen(&cmdbuf[len]);
  508. }
  509. if(emphasis != gEmphasis)
  510. {
  511. sprintf(&cmdbuf[len],"%c%dF",CTRL_EMBEDDED,emphasis);
  512. len += strlen(&cmdbuf[len]);
  513. }
  514. if(sayas != gSayas)
  515. {
  516. sprintf(&cmdbuf[len],"%c%dY",CTRL_EMBEDDED,sayas);
  517. len += strlen(&cmdbuf[len]);
  518. }
  519. gVolume = volume;
  520. gSpeed = speed;
  521. gPitch = pitch;
  522. gRange = range;
  523. gEmphasis = emphasis;
  524. gSayas = sayas;
  525. total += (len + pTextFragList->ulTextLen);
  526. if(pTextFragList->ulTextLen > 0)
  527. {
  528. total++;
  529. }
  530. if(pW != NULL)
  531. {
  532. for(ix=0; ix<len; ix++)
  533. {
  534. *pW++ = cmdbuf[ix];
  535. }
  536. frag_offsets[frag_count].textix = text_offset;
  537. frag_offsets[frag_count].bufix = pW - pW_start;
  538. frag_offsets[frag_count].cmdlen = len;
  539. #ifdef TEST_INPUT
  540. {
  541. FILE *f;
  542. unsigned int c;
  543. int n;
  544. char buf[10];
  545. f = fopen("C:\\espeak_text_log.txt","a");
  546. if(f != NULL)
  547. {
  548. fprintf(f,"----------\n");
  549. for(ix=0; ix<pTextFragList->ulTextLen; ix++)
  550. {
  551. c = pTextFragList->pTextStart[ix];
  552. n = utf8_out(c,buf);
  553. buf[n] = 0;
  554. fprintf(f,"%s",buf);
  555. }
  556. fprintf(f,"\n");
  557. fclose(f);
  558. }
  559. }
  560. #endif
  561. for(ix=0; ix<pTextFragList->ulTextLen; ix++)
  562. {
  563. *pW++ = pTextFragList->pTextStart[ix];
  564. }
  565. if(pTextFragList->ulTextLen > 0)
  566. {
  567. *pW++ = ' ';
  568. }
  569. }
  570. frag_count++;
  571. break;
  572. case SPVA_Bookmark:
  573. total += (2 + pTextFragList->ulTextLen);
  574. if(pW != NULL)
  575. {
  576. int index;
  577. for(ix=0; ix<pTextFragList->ulTextLen; ix++)
  578. {
  579. markbuf[ix] = (char )pTextFragList->pTextStart[ix];
  580. }
  581. markbuf[ix] = 0;
  582. if((index = AddNameData((const char *)markbuf,1)) >= 0)
  583. {
  584. sprintf(cmdbuf,"%c%dM",CTRL_EMBEDDED,index);
  585. len = strlen(cmdbuf);
  586. for(ix=0; ix<len; ix++)
  587. {
  588. *pW++ = cmdbuf[ix];
  589. }
  590. }
  591. }
  592. break;
  593. case SPVA_Pronounce:
  594. total += WritePhonemes(state->pPhoneIds, pW);
  595. if(pW != NULL)
  596. {
  597. pW += total;
  598. }
  599. break;
  600. }
  601. pTextFragList = pTextFragList->pNext;
  602. }
  603. if(pW != NULL)
  604. {
  605. *pW = 0;
  606. }
  607. *n_text = frag_count;
  608. return(total);
  609. } // end of ProcessFragList
  610. /*****************************************************************************
  611. * CTTSEngObj::Speak *
  612. *-------------------*
  613. * Description:
  614. * This is the primary method that SAPI calls to render text.
  615. *-----------------------------------------------------------------------------
  616. * Input Parameters
  617. *
  618. * pUser
  619. * Pointer to the current user profile object. This object contains
  620. * information like what languages are being used and this object
  621. * also gives access to resources like the SAPI master lexicon object.
  622. *
  623. * dwSpeakFlags
  624. * This is a set of flags used to control the behavior of the
  625. * SAPI voice object and the associated engine.
  626. *
  627. * VoiceFmtIndex
  628. * Zero based index specifying the output format that should
  629. * be used during rendering.
  630. *
  631. * pTextFragList
  632. * A linked list of text fragments to be rendered. There is
  633. * one fragement per XML state change. If the input text does
  634. * not contain any XML markup, there will only be a single fragment.
  635. *
  636. * pOutputSite
  637. * The interface back to SAPI where all output audio samples and events are written.
  638. *
  639. * Return Values
  640. * S_OK - This should be returned after successful rendering or if
  641. * rendering was interrupted because *pfContinue changed to FALSE.
  642. * E_INVALIDARG
  643. * E_OUTOFMEMORY
  644. *
  645. *****************************************************************************/
  646. STDMETHODIMP CTTSEngObj::Speak( DWORD dwSpeakFlags,
  647. REFGUID rguidFormatId,
  648. const WAVEFORMATEX * pWaveFormatEx,
  649. const SPVTEXTFRAG* pTextFragList,
  650. ISpTTSEngineSite* pOutputSite )
  651. {
  652. SPDBG_FUNC( "CTTSEngObj::Speak" );
  653. HRESULT hr = S_OK;
  654. unsigned int size;
  655. int xVolume;
  656. int xSpeed;
  657. int xPitch;
  658. int xRange;
  659. int xEmphasis;
  660. int xSayas;
  661. int punctuation;
  662. int n_text_frag=0;
  663. //--- Check args
  664. if( SP_IS_BAD_INTERFACE_PTR( pOutputSite ) ||
  665. SP_IS_BAD_READ_PTR( pTextFragList ) )
  666. {
  667. hr = E_INVALIDARG;
  668. }
  669. else
  670. {
  671. InitNamedata();
  672. //--- Init some vars
  673. m_pCurrFrag = pTextFragList;
  674. m_pNextChar = m_pCurrFrag->pTextStart;
  675. m_pEndChar = m_pNextChar + m_pCurrFrag->ulTextLen;
  676. m_ullAudioOff = 0;
  677. m_OutputSite = pOutputSite;
  678. pOutputSite->GetEventInterest(&event_interest);
  679. xVolume = gVolume;
  680. xSpeed = gSpeed;
  681. xPitch = gPitch;
  682. xRange = gRange;
  683. xEmphasis = gEmphasis;
  684. xSayas = gSayas;
  685. // find the size of the text buffer needed for this Speak() request
  686. size = ProcessFragList(pTextFragList,NULL,pOutputSite,&n_text_frag);
  687. gVolume = xVolume;
  688. gSpeed = xSpeed;
  689. gPitch = xPitch;
  690. gRange = xRange;
  691. gEmphasis = xEmphasis;
  692. gSayas = xSayas;
  693. punctuation = 0;
  694. if(dwSpeakFlags & SPF_NLP_SPEAK_PUNC)
  695. punctuation = 1;
  696. espeak_SetParameter(espeakPUNCTUATION,punctuation,0);
  697. size = (size + 50)*sizeof(wchar_t);
  698. if(size > gBufSize)
  699. {
  700. size += 1000; // some extra so we don't need to realloc() again too often
  701. TextBuf = (wchar_t *)realloc(TextBuf,size);
  702. if(TextBuf == NULL)
  703. {
  704. gBufSize=0;
  705. return(1);
  706. }
  707. gBufSize = size;
  708. }
  709. audio_latest = 0;
  710. prev_phoneme = 0;
  711. prev_phoneme_time = 0;
  712. prev_phoneme_position = 0;
  713. size = ProcessFragList(pTextFragList,TextBuf,pOutputSite,&n_text_frag);
  714. if(size > 0)
  715. {
  716. espeak_Synth(TextBuf,0,0,POS_CHARACTER,0,espeakCHARS_WCHAR | espeakKEEP_NAMEDATA | espeakPHONEMES,NULL,NULL);
  717. }
  718. }
  719. return hr;
  720. } /* CTTSEngObj::Speak */
  721. HRESULT CTTSEngObj::CheckActions( ISpTTSEngineSite* pOutputSite )
  722. {//==============================================================
  723. int control;
  724. USHORT volume;
  725. long rate;
  726. control = pOutputSite->GetActions();
  727. if(control & SPVES_VOLUME)
  728. {
  729. if(pOutputSite->GetVolume(&volume) == S_OK)
  730. {
  731. master_volume = volume;
  732. }
  733. }
  734. if(control & SPVES_RATE)
  735. {
  736. if(pOutputSite->GetRate(&rate) == S_OK)
  737. {
  738. master_rate = rate;
  739. }
  740. }
  741. return(S_OK);
  742. } // end of CTTSEngObj::CheckActions
  743. STDMETHODIMP CTTSEngObj::GetOutputFormat( const GUID * pTargetFormatId, const WAVEFORMATEX * pTargetWaveFormatEx,
  744. GUID * pDesiredFormatId, WAVEFORMATEX ** ppCoMemDesiredWaveFormatEx )
  745. {//========================================================================
  746. SPDBG_FUNC( "CTTSEngObj::GetVoiceFormat" );
  747. HRESULT hr = S_OK;
  748. enum SPSTREAMFORMAT sample_rate = SPSF_22kHz16BitMono;
  749. srate = 441;
  750. if(espeak_GetParameter(espeakVOICETYPE,1) == 1)
  751. {
  752. srate = 320;
  753. sample_rate = SPSF_16kHz16BitMono; // an mbrola voice
  754. }
  755. hr = SpConvertStreamFormatEnum(sample_rate, pDesiredFormatId, ppCoMemDesiredWaveFormatEx);
  756. return hr;
  757. } /* CTTSEngObj::GetVoiceFormat */
  758. int FAR PASCAL CompileDictionary(const char *voice, const char *path_log)
  759. {//===========================================================
  760. FILE *f_log3;
  761. char fname[120];
  762. f_log3 = fopen(path_log,"w");
  763. sprintf(fname,"%s/",path_install);
  764. espeak_SetVoiceByName(voice);
  765. espeak_CompileDictionary(fname,f_log3,0);
  766. fclose(f_log3);
  767. return(0);
  768. }