eSpeak NG is an open source speech synthesizer that supports more than hundred languages and accents.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

ttsengobj.cpp 23KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913
  1. /***************************************************************************
  2. * Copyright (C) 2005 to 2007 by Jonathan Duddington *
  3. * email: [email protected] *
  4. * *
  5. * This program is free software; you can redistribute it and/or modify *
  6. * it under the terms of the GNU General Public License as published by *
  7. * the Free Software Foundation; either version 3 of the License, or *
  8. * (at your option) any later version. *
  9. * *
  10. * This program is distributed in the hope that it will be useful, *
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  13. * GNU General Public License for more details. *
  14. * *
  15. * You should have received a copy of the GNU General Public License *
  16. * along with this program; if not, write see: *
  17. * <http://www.gnu.org/licenses/>. *
  18. ***************************************************************************/
  19. #include "stdafx.h"
  20. #include "TtsEngObj.h"
  21. #include "src/speak_lib.h"
  22. #include "stdio.h"
  23. #define CTRL_EMBEDDED 1
  24. CTTSEngObj *m_EngObj;
  25. ISpTTSEngineSite* m_OutputSite;
  26. FILE *f_log2=NULL;
  27. ULONGLONG event_interest;
  28. extern int AddNameData(const char *name, int wide);
  29. extern void InitNamedata(void);
  30. int master_volume = 100;
  31. int master_rate = 0;
  32. int gVolume = 100;
  33. int gSpeed = -1;
  34. int gPitch = -1;
  35. int gRange = -1;
  36. int gEmphasis = 0;
  37. int gSayas = 0;
  38. char *path_install = NULL;
  39. unsigned long audio_offset = 0;
  40. unsigned long audio_latest = 0;
  41. int prev_phoneme = 0;
  42. int prev_phoneme_position = 0;
  43. unsigned long prev_phoneme_time = 0;
  44. unsigned int gBufSize = 0;
  45. wchar_t *TextBuf=NULL;
  46. typedef struct {
  47. unsigned int bufix;
  48. unsigned int textix;
  49. unsigned int cmdlen;
  50. } FRAG_OFFSET;
  51. int srate; // samplerate, Hz/50
  52. int n_frag_offsets = 0;
  53. int frag_ix = 0;
  54. int frag_count=0;
  55. FRAG_OFFSET *frag_offsets = NULL;
  56. //#define TEST_INPUT // printf input text received from SAPI to espeak_text_log.txt
  57. #ifdef TEST_INPUT
  58. static int utf8_out(unsigned int c, char *buf)
  59. {//====================================
  60. // write a unicode character into a buffer as utf8
  61. // returns the number of bytes written
  62. int n_bytes;
  63. int j;
  64. int shift;
  65. static char unsigned code[4] = {0,0xc0,0xe0,0xf0};
  66. if(c < 0x80)
  67. {
  68. buf[0] = c;
  69. return(1);
  70. }
  71. if(c >= 0x110000)
  72. {
  73. buf[0] = ' '; // out of range character code
  74. return(1);
  75. }
  76. if(c < 0x0800)
  77. n_bytes = 1;
  78. else
  79. if(c < 0x10000)
  80. n_bytes = 2;
  81. else
  82. n_bytes = 3;
  83. shift = 6*n_bytes;
  84. buf[0] = code[n_bytes] | (c >> shift);
  85. for(j=0; j<n_bytes; j++)
  86. {
  87. shift -= 6;
  88. buf[j+1] = 0x80 + ((c >> shift) & 0x3f);
  89. }
  90. return(n_bytes+1);
  91. } // end of utf8_out
  92. #endif
  93. int VisemeCode(unsigned int phoneme_name)
  94. {//======================================
  95. // Convert eSpeak phoneme name into a SAPI viseme code
  96. int ix;
  97. unsigned int ph;
  98. unsigned int ph_name;
  99. #define PH(c1,c2) (c2<<8)+c1 // combine two characters into an integer for phoneme name
  100. const unsigned char initial_to_viseme[128] = {
  101. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  102. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  103. 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,19, 0, 0, 0, 0, 0,
  104. 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,255,
  105. 4, 2,18,16,17, 4,18,20,12, 6,16,20,14,21,20, 3,
  106. 21,20,13,16,17, 4, 1, 5,20, 7,16, 0, 0, 0, 0, 0,
  107. 0, 1,21,16,19, 4,18,20,12, 6, 6,20,14,21,19, 8,
  108. 21,20,13,15,19, 7,18, 7,20, 7,15, 0, 0, 0, 0, 0 };
  109. const unsigned int viseme_exceptions[] = {
  110. PH('a','I'), 11,
  111. PH('a','U'), 9,
  112. PH('O','I'), 10,
  113. PH('t','S'), 16,
  114. PH('d','Z'), 16,
  115. PH('_','|'), 255,
  116. 0
  117. };
  118. ph_name = phoneme_name & 0xffff;
  119. for(ix=0; (ph = viseme_exceptions[ix]) != 0; ix+=2)
  120. {
  121. if(ph == ph_name)
  122. {
  123. return(viseme_exceptions[ix+1]);
  124. }
  125. }
  126. return(initial_to_viseme[phoneme_name & 0x7f]);
  127. }
  128. int SynthCallback(short *wav, int numsamples, espeak_EVENT *events);
  129. int SynthCallback(short *wav, int numsamples, espeak_EVENT *events)
  130. {//================================================================
  131. int hr;
  132. wchar_t *tailptr;
  133. unsigned int text_offset;
  134. int length;
  135. int phoneme_duration;
  136. int this_viseme;
  137. espeak_EVENT *event;
  138. #define N_EVENTS 100
  139. int n_Events = 0;
  140. SPEVENT *Event;
  141. SPEVENT Events[N_EVENTS];
  142. if(m_OutputSite->GetActions() & SPVES_ABORT)
  143. return(1);
  144. m_EngObj->CheckActions(m_OutputSite);
  145. // return the events
  146. for(event=events; (event->type != 0) && (n_Events < N_EVENTS); event++)
  147. {
  148. audio_latest = event->audio_position + audio_offset;
  149. if((event->type == espeakEVENT_WORD) && (event->length > 0))
  150. {
  151. while(((frag_ix+1) < frag_count) &&
  152. ((event->text_position -1 + frag_offsets[frag_ix+1].cmdlen) >= frag_offsets[frag_ix+1].bufix))
  153. {
  154. frag_ix++;
  155. }
  156. text_offset = frag_offsets[frag_ix].textix +
  157. event->text_position -1 - frag_offsets[frag_ix].bufix + frag_offsets[frag_ix].cmdlen;
  158. length = event->length - frag_offsets[frag_ix].cmdlen;
  159. frag_offsets[frag_ix].cmdlen = 0;
  160. if(text_offset < 0)
  161. text_offset = 0;
  162. Event = &Events[n_Events++];
  163. Event->eEventId = SPEI_WORD_BOUNDARY;
  164. Event->elParamType = SPET_LPARAM_IS_UNDEFINED;
  165. Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10; // ms -> bytes
  166. Event->lParam = text_offset;
  167. Event->wParam = length;
  168. }
  169. if(event->type == espeakEVENT_MARK)
  170. {
  171. Event = &Events[n_Events++];
  172. Event->eEventId = SPEI_TTS_BOOKMARK;
  173. Event->elParamType = SPET_LPARAM_IS_STRING;
  174. Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10; // ms -> bytes
  175. Event->lParam = (long)event->id.name;
  176. Event->wParam = wcstol((wchar_t *)event->id.name,&tailptr,10);
  177. }
  178. if(event->type == espeakEVENT_PHONEME)
  179. {
  180. if(event_interest & SPEI_VISEME)
  181. {
  182. phoneme_duration = audio_latest - prev_phoneme_time;
  183. // ignore some phonemes (which translate to viseme=255)
  184. if((this_viseme = VisemeCode(event->id.number)) != 255)
  185. {
  186. Event = &Events[n_Events++];
  187. Event->eEventId = SPEI_VISEME;
  188. Event->elParamType = SPET_LPARAM_IS_UNDEFINED;
  189. Event->ullAudioStreamOffset = ((prev_phoneme_position + audio_offset) * srate)/10; // ms -> bytes
  190. Event->lParam = phoneme_duration << 16 | this_viseme;
  191. Event->wParam = VisemeCode(prev_phoneme);
  192. prev_phoneme = event->id.number;
  193. prev_phoneme_time = audio_latest;
  194. prev_phoneme_position = event->audio_position;
  195. }
  196. }
  197. }
  198. #ifdef deleted
  199. if(event->type == espeakEVENT_SENTENCE)
  200. {
  201. Event = &Events[n_Events++];
  202. Event->eEventId = SPEI_SENTENCE_BOUNDARY;
  203. Event->elParamType = SPET_LPARAM_IS_UNDEFINED;
  204. Event->ullAudioStreamOffset = ((event->audio_position + audio_offset) * srate)/10; // ms -> bytes
  205. Event->lParam = 0;
  206. Event->wParam = 0; // TEMP
  207. }
  208. #endif
  209. }
  210. if(n_Events > 0)
  211. m_OutputSite->AddEvents(Events, n_Events );
  212. // return the sound data
  213. hr = m_OutputSite->Write(wav, numsamples*2, NULL);
  214. return(hr);
  215. }
  216. static int ConvertRate(int new_rate)
  217. {//=================================
  218. int rate;
  219. static int rate_table[21] = {80,100,115,124,133,142,151,159,168,174,180,
  220. 187,196,208,220,240,270,300,335,369,390 };
  221. rate = new_rate + master_rate;
  222. if(rate < -10) rate = -10;
  223. if(rate > 10) rate = 10;
  224. return(rate_table[rate+10]);
  225. } // end of ConvertRate
  226. static int ConvertPitch(int pitch)
  227. {//===============================
  228. static int pitch_table[41] =
  229. {0, 0, 0, 0, 0, 0, 0, 0, 4, 8,12,16,20,24,28,32,36,40,44,47,50,
  230. 54,58,62,66,70,74,78,82,84,88,92,96,99,99,99,99,99,99,99,99};
  231. // {0,3,5,8,10,13,15,18,20,23,25,28,30,33,35,38,40,43,45,48,50,
  232. // 53,55,58,60,63,65,68,70,73,75,78,80,83,85,88,90,93,95,97,99};
  233. if(pitch < -20) pitch = -20;
  234. if(pitch > 20) pitch = 20;
  235. return(pitch_table[pitch+20]);
  236. }
  237. static int ConvertRange(int range)
  238. {//===============================
  239. static int range_table[21] = {16,28,39,49,58,66,74,81,88,94,100,105,110,115,120,125,130,135,140,145,150};
  240. if(range < -10) range = -10;
  241. if(range > 10) range = 10;
  242. return(range_table[range+10]/2);
  243. }
  244. HRESULT CTTSEngObj::FinalConstruct()
  245. {//=================================
  246. SPDBG_FUNC( "CTTSEngObj::FinalConstruct" );
  247. HRESULT hr = S_OK;
  248. #ifdef LOG_DEBUG
  249. f_log2=fopen("C:\\log_espeak","a");
  250. if(f_log2) fprintf(f_log2,"\n****\n");
  251. #endif
  252. //--- Init vars
  253. m_hVoiceData = NULL;
  254. m_pVoiceData = NULL;
  255. m_pWordList = NULL;
  256. m_ulNumWords = 0;
  257. m_EngObj = this;
  258. return hr;
  259. } /* CTTSEngObj::FinalConstruct */
  260. void CTTSEngObj::FinalRelease()
  261. {//============================
  262. SPDBG_FUNC( "CTTSEngObj::FinalRelease" );
  263. delete m_pWordList;
  264. #ifdef LOG_DEBUG
  265. if(f_log2!=NULL) fclose(f_log2);
  266. #endif
  267. if( m_pVoiceData )
  268. {
  269. ::UnmapViewOfFile( (void*)m_pVoiceData );
  270. }
  271. if( m_hVoiceData )
  272. {
  273. ::CloseHandle( m_hVoiceData );
  274. }
  275. } /* CTTSEngObj::FinalRelease */
  276. //
  277. //=== ISpObjectWithToken Implementation ======================================
  278. //
  279. void WcharToChar(char *out, const wchar_t *in, int len)
  280. {//====================================================
  281. int ix;
  282. for(ix=0; ix<len; ix++)
  283. {
  284. if((out[ix] = (char)in[ix]) == 0)
  285. break;
  286. }
  287. out[len-1] = 0;
  288. }
  289. /*****************************************************************************
  290. * CTTSEngObj::SetObjectToken *
  291. *----------------------------*
  292. * Description:
  293. * Read the "VoiceName" attribute from the registry, and use it to select
  294. * an eSpeak voice file
  295. *****************************************************************************/
  296. STDMETHODIMP CTTSEngObj::SetObjectToken(ISpObjectToken * pToken)
  297. {
  298. char voice[80];
  299. strcpy(voice,"default");
  300. SPDBG_FUNC( "CTTSEngObj::SetObjectToken" );
  301. HRESULT hr = SpGenericSetObjectToken(pToken, m_cpToken);
  302. if( SUCCEEDED( hr ) )
  303. {
  304. CSpDynamicString voicename;
  305. CSpDynamicString path;
  306. HRESULT hr2;
  307. int len;
  308. hr2 = m_cpToken->GetStringValue( L"VoiceName", &voicename);
  309. if( SUCCEEDED(hr2) )
  310. {
  311. WcharToChar(voice,voicename,sizeof(voice));
  312. }
  313. hr2 = m_cpToken->GetStringValue( L"Path", &path);
  314. if( SUCCEEDED(hr2) )
  315. {
  316. len = wcslen(path)+1;
  317. path_install = (char *)malloc(len);
  318. WcharToChar(path_install,path,len);
  319. }
  320. }
  321. gVolume = 100;
  322. gSpeed = -1;
  323. gPitch = -1;
  324. gRange = -1;
  325. gEmphasis = 0;
  326. gSayas = 0;
  327. espeak_Initialize(AUDIO_OUTPUT_SYNCHRONOUS,100,path_install,1);
  328. espeak_SetVoiceByName(voice);
  329. espeak_SetSynthCallback(SynthCallback);
  330. return hr;
  331. } /* CTTSEngObj::SetObjectToken */
  332. //
  333. //=== ISpTTSEngine Implementation ============================================
  334. //
  335. #define L(c1,c2) (c1<<8)+c2 // combine two characters into an integer
  336. static char *phoneme_names_en[] = {
  337. NULL,NULL,NULL," ",NULL,NULL,NULL,NULL,"'",",",
  338. "A:","a","V","0","aU","@","aI",
  339. "b","tS","d","D","E","3:","eI",
  340. "f","g","h","I","i:","dZ","k",
  341. "l","m","n","N","oU","OI","p",
  342. "r","s","S","t","T","U","u:",
  343. "v","w","j","z","Z",
  344. NULL
  345. };
  346. int CTTSEngObj::WritePhonemes(SPPHONEID *phons, wchar_t *pW)
  347. {//=========================================================
  348. int ph;
  349. int ix=2;
  350. int skip=0;
  351. int maxph = 49;
  352. char *p;
  353. int j;
  354. int lang;
  355. char **phoneme_names;
  356. char phbuf[200];
  357. espeak_VOICE *voice;
  358. voice = espeak_GetCurrentVoice();
  359. lang = (voice->languages[1] << 8) + (voice->languages[2]);
  360. phoneme_names = phoneme_names_en;
  361. maxph = 0;
  362. if(lang == L('e','n'))
  363. {
  364. phoneme_names = phoneme_names_en;
  365. maxph = 49;
  366. }
  367. if(maxph == 0)
  368. return(0);
  369. strcpy(phbuf,"[[");
  370. while(((ph = *phons++) != 0) && (ix < (sizeof(phbuf) - 3)))
  371. {
  372. if(skip)
  373. {
  374. skip = 0;
  375. continue;
  376. }
  377. if(ph > maxph)
  378. continue;
  379. p = phoneme_names[phons[0]]; // look at the phoneme after this one
  380. if(p != NULL)
  381. {
  382. if(p[0] == '\'')
  383. {
  384. phbuf[ix++] = '\''; // primary stress, put before the vowel, not after
  385. skip=1;
  386. }
  387. if(p[0] == ',')
  388. {
  389. phbuf[ix++] = ','; // secondary stress
  390. skip=1;
  391. }
  392. }
  393. p = phoneme_names[ph]; // look at this phoneme
  394. if(p != NULL)
  395. {
  396. strcpy(&phbuf[ix],p);
  397. ix += strlen(p);
  398. }
  399. }
  400. strcpy(&phbuf[ix],"]]");
  401. ix += 2;
  402. if(pW != NULL)
  403. {
  404. for(j=0; j<=ix; j++)
  405. {
  406. pW[j] = phbuf[j];
  407. }
  408. }
  409. return(strlen(phbuf));
  410. }
  411. int CTTSEngObj::ProcessFragList(const SPVTEXTFRAG* pTextFragList, wchar_t *pW_start, ISpTTSEngineSite* pOutputSite, int *n_text)
  412. {//============================================================================================================================
  413. int action;
  414. int control;
  415. wchar_t *pW;
  416. const SPVSTATE *state;
  417. unsigned int ix;
  418. unsigned int len;
  419. unsigned int total=0;
  420. char cmdbuf[50];
  421. wchar_t markbuf[32];
  422. int speed;
  423. int volume;
  424. int pitch;
  425. int range;
  426. int emphasis;
  427. int sayas;
  428. unsigned int text_offset = 0;
  429. frag_count = 0;
  430. frag_ix = 0;
  431. pW = pW_start;
  432. while(pTextFragList != NULL)
  433. {
  434. action = pTextFragList->State.eAction;
  435. control = pOutputSite->GetActions();
  436. len = pTextFragList->ulTextLen;
  437. if(control & SPVES_ABORT)
  438. break;
  439. CheckActions(pOutputSite);
  440. sayas = 0;
  441. state = &pTextFragList->State;
  442. switch(action)
  443. {
  444. case SPVA_SpellOut:
  445. sayas = 0x12; // SAYAS_CHARS; // drop through to SPVA_Speak
  446. case SPVA_Speak:
  447. text_offset = pTextFragList->ulTextSrcOffset;
  448. audio_offset = audio_latest;
  449. #ifdef deleted
  450. // attempt to recognise when JAWS is spelling, it doesn't use SPVA_SpellOut
  451. if((pW != NULL) && (*n_text == 1) && ((len == 1) || ((len==2) && (pTextFragList->pTextStart[1]==' '))))
  452. {
  453. // A single text fragment with one character. Speak as a character, not a word
  454. sayas = 0x11;
  455. gSayas = 0;
  456. }
  457. #endif
  458. if(frag_count >= n_frag_offsets)
  459. {
  460. if((frag_offsets = (FRAG_OFFSET *)realloc(frag_offsets,sizeof(FRAG_OFFSET)*(frag_count+500))) != NULL)
  461. {
  462. n_frag_offsets = frag_count+500;
  463. }
  464. }
  465. // first set the volume, rate, pitch
  466. volume = (state->Volume * master_volume)/100;
  467. speed = ConvertRate(state->RateAdj);
  468. pitch = ConvertPitch(state->PitchAdj.MiddleAdj);
  469. range = ConvertRange(state->PitchAdj.RangeAdj);
  470. emphasis = state->EmphAdj;
  471. if(emphasis != 0)
  472. emphasis = 3;
  473. len = 0;
  474. if(volume != gVolume)
  475. {
  476. sprintf(&cmdbuf[len],"%c%dA",CTRL_EMBEDDED,volume);
  477. len += strlen(&cmdbuf[len]);
  478. }
  479. if(speed != gSpeed)
  480. {
  481. sprintf(&cmdbuf[len],"%c%dS",CTRL_EMBEDDED,speed);
  482. len += strlen(&cmdbuf[len]);
  483. }
  484. if(pitch != gPitch)
  485. {
  486. sprintf(&cmdbuf[len],"%c%dP",CTRL_EMBEDDED,pitch);
  487. len += strlen(&cmdbuf[len]);
  488. }
  489. if(range != gRange)
  490. {
  491. sprintf(&cmdbuf[len],"%c%dR",CTRL_EMBEDDED,range);
  492. len += strlen(&cmdbuf[len]);
  493. }
  494. if(emphasis != gEmphasis)
  495. {
  496. sprintf(&cmdbuf[len],"%c%dF",CTRL_EMBEDDED,emphasis);
  497. len += strlen(&cmdbuf[len]);
  498. }
  499. if(sayas != gSayas)
  500. {
  501. sprintf(&cmdbuf[len],"%c%dY",CTRL_EMBEDDED,sayas);
  502. len += strlen(&cmdbuf[len]);
  503. }
  504. gVolume = volume;
  505. gSpeed = speed;
  506. gPitch = pitch;
  507. gRange = range;
  508. gEmphasis = emphasis;
  509. gSayas = sayas;
  510. total += (len + pTextFragList->ulTextLen);
  511. if(pTextFragList->ulTextLen > 0)
  512. {
  513. total++;
  514. }
  515. if(pW != NULL)
  516. {
  517. for(ix=0; ix<len; ix++)
  518. {
  519. *pW++ = cmdbuf[ix];
  520. }
  521. frag_offsets[frag_count].textix = text_offset;
  522. frag_offsets[frag_count].bufix = pW - pW_start;
  523. frag_offsets[frag_count].cmdlen = len;
  524. #ifdef TEST_INPUT
  525. {
  526. FILE *f;
  527. unsigned int c;
  528. int n;
  529. char buf[10];
  530. f = fopen("C:\\espeak_text_log.txt","a");
  531. if(f != NULL)
  532. {
  533. fprintf(f,"----------\n");
  534. for(ix=0; ix<pTextFragList->ulTextLen; ix++)
  535. {
  536. c = pTextFragList->pTextStart[ix];
  537. n = utf8_out(c,buf);
  538. buf[n] = 0;
  539. fprintf(f,"%s",buf);
  540. }
  541. fprintf(f,"\n");
  542. fclose(f);
  543. }
  544. }
  545. #endif
  546. for(ix=0; ix<pTextFragList->ulTextLen; ix++)
  547. {
  548. *pW++ = pTextFragList->pTextStart[ix];
  549. }
  550. if(pTextFragList->ulTextLen > 0)
  551. {
  552. *pW++ = ' ';
  553. }
  554. }
  555. frag_count++;
  556. break;
  557. case SPVA_Bookmark:
  558. total += (2 + pTextFragList->ulTextLen);
  559. if(pW != NULL)
  560. {
  561. int index;
  562. for(ix=0; ix<pTextFragList->ulTextLen; ix++)
  563. {
  564. markbuf[ix] = (char )pTextFragList->pTextStart[ix];
  565. }
  566. markbuf[ix] = 0;
  567. if((index = AddNameData((const char *)markbuf,1)) >= 0)
  568. {
  569. sprintf(cmdbuf,"%c%dM",CTRL_EMBEDDED,index);
  570. len = strlen(cmdbuf);
  571. for(ix=0; ix<len; ix++)
  572. {
  573. *pW++ = cmdbuf[ix];
  574. }
  575. }
  576. }
  577. break;
  578. case SPVA_Pronounce:
  579. total += WritePhonemes(state->pPhoneIds, pW);
  580. if(pW != NULL)
  581. {
  582. pW += total;
  583. }
  584. break;
  585. }
  586. pTextFragList = pTextFragList->pNext;
  587. }
  588. if(pW != NULL)
  589. {
  590. *pW = 0;
  591. }
  592. *n_text = frag_count;
  593. return(total);
  594. } // end of ProcessFragList
  595. /*****************************************************************************
  596. * CTTSEngObj::Speak *
  597. *-------------------*
  598. * Description:
  599. * This is the primary method that SAPI calls to render text.
  600. *-----------------------------------------------------------------------------
  601. * Input Parameters
  602. *
  603. * pUser
  604. * Pointer to the current user profile object. This object contains
  605. * information like what languages are being used and this object
  606. * also gives access to resources like the SAPI master lexicon object.
  607. *
  608. * dwSpeakFlags
  609. * This is a set of flags used to control the behavior of the
  610. * SAPI voice object and the associated engine.
  611. *
  612. * VoiceFmtIndex
  613. * Zero based index specifying the output format that should
  614. * be used during rendering.
  615. *
  616. * pTextFragList
  617. * A linked list of text fragments to be rendered. There is
  618. * one fragement per XML state change. If the input text does
  619. * not contain any XML markup, there will only be a single fragment.
  620. *
  621. * pOutputSite
  622. * The interface back to SAPI where all output audio samples and events are written.
  623. *
  624. * Return Values
  625. * S_OK - This should be returned after successful rendering or if
  626. * rendering was interrupted because *pfContinue changed to FALSE.
  627. * E_INVALIDARG
  628. * E_OUTOFMEMORY
  629. *
  630. *****************************************************************************/
  631. STDMETHODIMP CTTSEngObj::Speak( DWORD dwSpeakFlags,
  632. REFGUID rguidFormatId,
  633. const WAVEFORMATEX * pWaveFormatEx,
  634. const SPVTEXTFRAG* pTextFragList,
  635. ISpTTSEngineSite* pOutputSite )
  636. {
  637. SPDBG_FUNC( "CTTSEngObj::Speak" );
  638. HRESULT hr = S_OK;
  639. unsigned int size;
  640. int xVolume;
  641. int xSpeed;
  642. int xPitch;
  643. int xRange;
  644. int xEmphasis;
  645. int xSayas;
  646. int punctuation;
  647. int n_text_frag=0;
  648. //--- Check args
  649. if( SP_IS_BAD_INTERFACE_PTR( pOutputSite ) ||
  650. SP_IS_BAD_READ_PTR( pTextFragList ) )
  651. {
  652. hr = E_INVALIDARG;
  653. }
  654. else
  655. {
  656. InitNamedata();
  657. //--- Init some vars
  658. m_pCurrFrag = pTextFragList;
  659. m_pNextChar = m_pCurrFrag->pTextStart;
  660. m_pEndChar = m_pNextChar + m_pCurrFrag->ulTextLen;
  661. m_ullAudioOff = 0;
  662. m_OutputSite = pOutputSite;
  663. pOutputSite->GetEventInterest(&event_interest);
  664. xVolume = gVolume;
  665. xSpeed = gSpeed;
  666. xPitch = gPitch;
  667. xRange = gRange;
  668. xEmphasis = gEmphasis;
  669. xSayas = gSayas;
  670. // find the size of the text buffer needed for this Speak() request
  671. size = ProcessFragList(pTextFragList,NULL,pOutputSite,&n_text_frag);
  672. gVolume = xVolume;
  673. gSpeed = xSpeed;
  674. gPitch = xPitch;
  675. gRange = xRange;
  676. gEmphasis = xEmphasis;
  677. gSayas = xSayas;
  678. punctuation = 0;
  679. if(dwSpeakFlags & SPF_NLP_SPEAK_PUNC)
  680. punctuation = 1;
  681. espeak_SetParameter(espeakPUNCTUATION,punctuation,0);
  682. size = (size + 50)*sizeof(wchar_t);
  683. if(size > gBufSize)
  684. {
  685. size += 1000; // some extra so we don't need to realloc() again too often
  686. TextBuf = (wchar_t *)realloc(TextBuf,size);
  687. if(TextBuf == NULL)
  688. {
  689. gBufSize=0;
  690. return(1);
  691. }
  692. gBufSize = size;
  693. }
  694. audio_latest = 0;
  695. prev_phoneme = 0;
  696. prev_phoneme_time = 0;
  697. prev_phoneme_position = 0;
  698. size = ProcessFragList(pTextFragList,TextBuf,pOutputSite,&n_text_frag);
  699. if(size > 0)
  700. {
  701. espeak_Synth(TextBuf,0,0,POS_CHARACTER,0,espeakCHARS_WCHAR | espeakKEEP_NAMEDATA | espeakPHONEMES,NULL,NULL);
  702. }
  703. }
  704. return hr;
  705. } /* CTTSEngObj::Speak */
  706. HRESULT CTTSEngObj::CheckActions( ISpTTSEngineSite* pOutputSite )
  707. {//==============================================================
  708. int control;
  709. USHORT volume;
  710. long rate;
  711. control = pOutputSite->GetActions();
  712. if(control & SPVES_VOLUME)
  713. {
  714. if(pOutputSite->GetVolume(&volume) == S_OK)
  715. {
  716. master_volume = volume;
  717. }
  718. }
  719. if(control & SPVES_RATE)
  720. {
  721. if(pOutputSite->GetRate(&rate) == S_OK)
  722. {
  723. master_rate = rate;
  724. }
  725. }
  726. return(S_OK);
  727. } // end of CTTSEngObj::CheckActions
  728. STDMETHODIMP CTTSEngObj::GetOutputFormat( const GUID * pTargetFormatId, const WAVEFORMATEX * pTargetWaveFormatEx,
  729. GUID * pDesiredFormatId, WAVEFORMATEX ** ppCoMemDesiredWaveFormatEx )
  730. {//========================================================================
  731. SPDBG_FUNC( "CTTSEngObj::GetVoiceFormat" );
  732. HRESULT hr = S_OK;
  733. enum SPSTREAMFORMAT sample_rate = SPSF_22kHz16BitMono;
  734. srate = 441;
  735. if(espeak_GetParameter(espeakVOICETYPE,1) == 1)
  736. {
  737. srate = 320;
  738. sample_rate = SPSF_16kHz16BitMono; // an mbrola voice
  739. }
  740. hr = SpConvertStreamFormatEnum(sample_rate, pDesiredFormatId, ppCoMemDesiredWaveFormatEx);
  741. return hr;
  742. } /* CTTSEngObj::GetVoiceFormat */
  743. int FAR PASCAL CompileDictionary(const char *voice, const char *path_log)
  744. {//===========================================================
  745. FILE *f_log3;
  746. char fname[120];
  747. f_log3 = fopen(path_log,"w");
  748. sprintf(fname,"%s/",path_install);
  749. espeak_SetVoiceByName(voice);
  750. espeak_CompileDictionary(fname,f_log3,0);
  751. fclose(f_log3);
  752. return(0);
  753. }