| @@ -227,12 +227,12 @@ noinst_HEADERS = \ | |||
| src/ucd-tools/src/include/ucd/ucd.h | |||
| if OPT_KLATT | |||
| src_libespeak_ng_la_CFLAGS += -DINCLUDE_KLATT | |||
| AM_CFLAGS += -DUSE_KLATT=1 | |||
| src_libespeak_ng_la_SOURCES += src/libespeak-ng/klatt.c | |||
| endif | |||
| if OPT_SPEECHPLAYER | |||
| src_libespeak_ng_la_CFLAGS += -DINCLUDE_SPEECHPLAYER | |||
| AM_CFLAGS += -DUSE_SPEECHPLAYER=1 | |||
| src_libespeak_ng_la_SOURCES += src/libespeak-ng/sPlayer.c | |||
| src_libespeak_ng_la_SOURCES += src/speechPlayer/src/frame.cpp | |||
| src_libespeak_ng_la_SOURCES += src/speechPlayer/src/speechPlayer.cpp | |||
| @@ -243,12 +243,12 @@ src_speak_ng_SOURCES = src/speak-ng.c | |||
| endif | |||
| if OPT_MBROLA | |||
| src_libespeak_ng_la_CFLAGS += -DINCLUDE_MBROLA | |||
| AM_CFLAGS += -DUSE_MBROLA=1 | |||
| src_libespeak_ng_la_SOURCES += src/libespeak-ng/mbrowrap.c | |||
| endif | |||
| if OPT_ASYNC | |||
| src_libespeak_ng_la_CFLAGS += -DUSE_ASYNC | |||
| AM_CFLAGS += -DUSE_ASYNC=1 | |||
| src_libespeak_ng_la_SOURCES += \ | |||
| src/libespeak-ng/espeak_command.c \ | |||
| src/libespeak-ng/event.c \ | |||
| @@ -290,14 +290,6 @@ src_libespeak_ng_test_la_CFLAGS = \ | |||
| ${PCAUDIOLIB_CFLAGS} ${AM_CFLAGS} | |||
| src_libespeak_ng_test_la_SOURCES = $(src_libespeak_ng_la_SOURCES) | |||
| if OPT_KLATT | |||
| src_libespeak_ng_test_la_CFLAGS += -DINCLUDE_KLATT | |||
| endif | |||
| if OPT_SPEECHPLAYER | |||
| src_libespeak_ng_test_la_CFLAGS += -DINCLUDE_SPEECHPLAYER | |||
| endif | |||
| check_PROGRAMS += tests/encoding.test | |||
| tests_encoding_test_LDADD = src/libespeak-ng.la | |||
| @@ -218,7 +218,7 @@ static int OpenWavFile(char *path, int rate) | |||
| f_wavfile = NULL; | |||
| if (path[0] != 0) { | |||
| if (strcmp(path, "stdout") == 0) { | |||
| #ifdef PLATFORM_WINDOWS | |||
| #if PLATFORM_WINDOWS | |||
| // prevent Windows adding 0x0d before 0x0a bytes | |||
| _setmode(_fileno(stdout), _O_BINARY); | |||
| #endif | |||
| @@ -27,7 +27,7 @@ | |||
| #include "espeak_command.h" | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| static unsigned int my_current_text_id = 0; | |||
| @@ -38,7 +38,7 @@ | |||
| #include "fifo.h" | |||
| #include "event.h" | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| // my_mutex: protects my_thread_is_talking, | |||
| // my_stop_is_required, and the command fifo | |||
| @@ -38,7 +38,7 @@ | |||
| #include "common.h" // for espeak_rand | |||
| #include "synthesize.h" // for frame_t, WGEN_DATA, STEPSIZE, N_KLATTP, echo... | |||
| #include "voice.h" // for voice_t, N_PEAKS | |||
| #ifdef INCLUDE_SPEECHPLAYER | |||
| #if USE_SPEECHPLAYER | |||
| #include "sPlayer.h" | |||
| #endif | |||
| @@ -433,7 +433,7 @@ void KlattReset(int control) | |||
| { | |||
| int r_ix; | |||
| #ifdef INCLUDE_SPEECHPLAYER | |||
| #if USE_SPEECHPLAYER | |||
| KlattResetSP(); | |||
| #endif | |||
| @@ -466,7 +466,7 @@ void KlattReset(int control) | |||
| void KlattFini(void) | |||
| { | |||
| #ifdef INCLUDE_SPEECHPLAYER | |||
| #if USE_SPEECHPLAYER | |||
| KlattFiniSP(); | |||
| #endif | |||
| } | |||
| @@ -859,7 +859,7 @@ static double klattp_inc[N_KLATTP]; | |||
| int Wavegen_Klatt(int length, int resume, frame_t *fr1, frame_t *fr2, WGEN_DATA *wdata, voice_t *wvoice) | |||
| { | |||
| #ifdef INCLUDE_SPEECHPLAYER | |||
| #if USE_SPEECHPLAYER | |||
| if(wvoice->klattv[0] == 6) | |||
| return Wavegen_KlattSP(wdata, wvoice, length, resume, fr1, fr2); | |||
| #endif | |||
| @@ -1078,7 +1078,7 @@ void KlattInit() | |||
| int ix; | |||
| #ifdef INCLUDE_SPEECHPLAYER | |||
| #if USE_SPEECHPLAYER | |||
| KlattInitSP(); | |||
| #endif | |||
| @@ -157,7 +157,7 @@ void SetSpeed(int control) | |||
| speed.min_pause = 5; | |||
| #if HAVE_SONIC_H | |||
| #if USE_LIBSONIC | |||
| int wpm_value = wpm; | |||
| if (voice->speed_percent > 0) | |||
| @@ -88,7 +88,7 @@ static espeak_ng_STATUS LoadSoundFile(const char *fname, int index, espeak_ng_ER | |||
| fclose(f); | |||
| f = NULL; | |||
| #ifdef HAVE_MKSTEMP | |||
| #if HAVE_MKSTEMP | |||
| strcpy(fname_temp, "/tmp/espeakXXXXXX"); | |||
| int fd_temp; | |||
| if ((fd_temp = mkstemp(fname_temp)) >= 0) | |||
| @@ -33,7 +33,7 @@ | |||
| #include <unistd.h> | |||
| #include <wchar.h> | |||
| #ifdef HAVE_PCAUDIOLIB_AUDIO_H | |||
| #if USE_LIBPCAUDIO | |||
| #include <pcaudiolib/audio.h> | |||
| #endif | |||
| @@ -71,7 +71,7 @@ espeak_EVENT *event_list = NULL; | |||
| static int event_list_ix = 0; | |||
| static int n_event_list; | |||
| static long count_samples; | |||
| #ifdef HAVE_PCAUDIOLIB_AUDIO_H | |||
| #if USE_LIBPCAUDIO | |||
| static struct audio_object *my_audio = NULL; | |||
| #endif | |||
| @@ -90,7 +90,7 @@ extern int saved_parameters[N_SPEECH_PARAM]; // Parameters saved on synthesis st | |||
| void cancel_audio(void) | |||
| { | |||
| #ifdef HAVE_PCAUDIOLIB_AUDIO_H | |||
| #if USE_LIBPCAUDIO | |||
| if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) { | |||
| audio_object_flush(my_audio); | |||
| } | |||
| @@ -100,7 +100,7 @@ void cancel_audio(void) | |||
| static int dispatch_audio(short *outbuf, int length, espeak_EVENT *event) | |||
| { | |||
| int a_wave_can_be_played = 1; | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| if ((my_mode & ENOUTPUT_MODE_SYNCHRONOUS) == 0) | |||
| a_wave_can_be_played = fifo_is_command_enabled(); | |||
| #endif | |||
| @@ -118,7 +118,7 @@ static int dispatch_audio(short *outbuf, int length, espeak_EVENT *event) | |||
| voice_samplerate = event->id.number; | |||
| if (out_samplerate != voice_samplerate) { | |||
| #ifdef HAVE_PCAUDIOLIB_AUDIO_H | |||
| #if USE_LIBPCAUDIO | |||
| if (out_samplerate != 0) { | |||
| // sound was previously open with a different sample rate | |||
| audio_object_close(my_audio); | |||
| @@ -128,7 +128,7 @@ static int dispatch_audio(short *outbuf, int length, espeak_EVENT *event) | |||
| #endif | |||
| } | |||
| #endif | |||
| #ifdef HAVE_PCAUDIOLIB_AUDIO_H | |||
| #if USE_LIBPCAUDIO | |||
| int error = audio_object_open(my_audio, AUDIO_OBJECT_FORMAT_S16LE, voice_samplerate, 1); | |||
| if (error != 0) { | |||
| fprintf(stderr, "error: %s\n", audio_object_strerror(my_audio, error)); | |||
| @@ -137,14 +137,14 @@ static int dispatch_audio(short *outbuf, int length, espeak_EVENT *event) | |||
| } | |||
| #endif | |||
| out_samplerate = voice_samplerate; | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| if ((my_mode & ENOUTPUT_MODE_SYNCHRONOUS) == 0) | |||
| event_init(); | |||
| #endif | |||
| } | |||
| } | |||
| #ifdef HAVE_PCAUDIOLIB_AUDIO_H | |||
| #if USE_LIBPCAUDIO | |||
| if (out_samplerate == 0) { | |||
| int error = audio_object_open(my_audio, AUDIO_OBJECT_FORMAT_S16LE, voice_samplerate, 1); | |||
| if (error != 0) { | |||
| @@ -156,7 +156,7 @@ static int dispatch_audio(short *outbuf, int length, espeak_EVENT *event) | |||
| } | |||
| #endif | |||
| #ifdef HAVE_PCAUDIOLIB_AUDIO_H | |||
| #if USE_LIBPCAUDIO | |||
| if (outbuf && length && a_wave_can_be_played) { | |||
| int error = audio_object_write(my_audio, (char *)outbuf, 2*length); | |||
| if (error != 0) | |||
| @@ -164,7 +164,7 @@ static int dispatch_audio(short *outbuf, int length, espeak_EVENT *event) | |||
| } | |||
| #endif | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| while (event && a_wave_can_be_played) { | |||
| // TBD: some event are filtered here but some insight might be given | |||
| // TBD: in synthesise.cpp for avoiding to create WORDs with size=0. | |||
| @@ -218,7 +218,7 @@ static int create_events(short *outbuf, int length, espeak_EVENT *event_list) | |||
| return finished; | |||
| } | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| int sync_espeak_terminated_msg(uint32_t unique_identifier, void *user_data) | |||
| { | |||
| @@ -266,12 +266,12 @@ static int check_data_path(const char *path, int allow_directory) | |||
| ESPEAK_NG_API espeak_ng_STATUS espeak_ng_InitializeOutput(espeak_ng_OUTPUT_MODE output_mode, int buffer_length, const char *device) | |||
| { | |||
| (void)device; // unused if HAVE_PCAUDIOLIB_AUDIO_H is not defined | |||
| (void)device; // unused if USE_LIBPCAUDIO is not defined | |||
| my_mode = output_mode; | |||
| out_samplerate = 0; | |||
| #ifdef HAVE_PCAUDIOLIB_AUDIO_H | |||
| #if USE_LIBPCAUDIO | |||
| if (((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) && (my_audio == NULL)) | |||
| my_audio = create_audio_device_object(device, "eSpeak", "Text-to-Speech"); | |||
| #endif | |||
| @@ -308,7 +308,7 @@ ESPEAK_NG_API void espeak_ng_InitializePath(const char *path) | |||
| if (check_data_path(path, 1)) | |||
| return; | |||
| #ifdef PLATFORM_WINDOWS | |||
| #if PLATFORM_WINDOWS | |||
| HKEY RegKey; | |||
| unsigned long size; | |||
| unsigned long var_type; | |||
| @@ -396,7 +396,7 @@ ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Initialize(espeak_ng_ERROR_CONTEXT *con | |||
| SetParameter(espeakPUNCTUATION, option_punctuation, 0); | |||
| SetParameter(espeakWORDGAP, 0, 0); | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| fifo_init(); | |||
| #endif | |||
| @@ -571,7 +571,7 @@ espeak_ng_STATUS sync_espeak_Synth(unsigned int unique_identifier, const void *t | |||
| end_character_position = end_position; | |||
| espeak_ng_STATUS aStatus = Synthesize(unique_identifier, text, flags); | |||
| #ifdef HAVE_PCAUDIOLIB_AUDIO_H | |||
| #if USE_LIBPCAUDIO | |||
| if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) { | |||
| int error = (aStatus == ENS_SPEECH_STOPPED) | |||
| ? audio_object_flush(my_audio) | |||
| @@ -647,7 +647,7 @@ void sync_espeak_SetPunctuationList(const wchar_t *punctlist) | |||
| ESPEAK_API void espeak_SetSynthCallback(t_espeak_callback *SynthCallback) | |||
| { | |||
| synth_callback = SynthCallback; | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| event_set_callback(synth_callback); | |||
| #endif | |||
| } | |||
| @@ -670,7 +670,7 @@ espeak_ng_Synthesize(const void *text, size_t size, | |||
| if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS) | |||
| return sync_espeak_Synth(0, text, position, position_type, end_position, flags, user_data); | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| // Create the text command | |||
| t_espeak_command *c1 = create_espeak_text(text, size, position, position_type, end_position, flags, user_data); | |||
| if (c1) { | |||
| @@ -719,7 +719,7 @@ espeak_ng_SynthesizeMark(const void *text, | |||
| if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS) | |||
| return sync_espeak_Synth_Mark(0, text, index_mark, end_position, flags, user_data); | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| // Create the mark command | |||
| t_espeak_command *c1 = create_espeak_mark(text, size, index_mark, end_position, | |||
| flags, user_data); | |||
| @@ -756,7 +756,7 @@ ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SpeakKeyName(const char *key_name) | |||
| if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS) | |||
| return sync_espeak_Key(key_name); | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| t_espeak_command *c = create_espeak_key(key_name, NULL); | |||
| espeak_ng_STATUS status = fifo_add_command(c); | |||
| if (status != ENS_OK) | |||
| @@ -771,7 +771,7 @@ ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SpeakCharacter(wchar_t character) | |||
| { | |||
| // is there a system resource of character names per language? | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS) | |||
| return sync_espeak_Char(character); | |||
| @@ -795,7 +795,7 @@ ESPEAK_API int espeak_GetParameter(espeak_PARAMETER parameter, int current) | |||
| ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SetParameter(espeak_PARAMETER parameter, int value, int relative) | |||
| { | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS) | |||
| return SetParameter(parameter, value, relative); | |||
| @@ -814,7 +814,7 @@ ESPEAK_NG_API espeak_ng_STATUS espeak_ng_SetPunctuationList(const wchar_t *punct | |||
| { | |||
| // Set the list of punctuation which are spoken for "some". | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| if (my_mode & ENOUTPUT_MODE_SYNCHRONOUS) { | |||
| sync_espeak_SetPunctuationList(punctlist); | |||
| return ENS_OK; | |||
| @@ -874,12 +874,12 @@ ESPEAK_API const char *espeak_TextToPhonemes(const void **textptr, int textmode, | |||
| ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Cancel(void) | |||
| { | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| fifo_stop(); | |||
| event_clear_all(); | |||
| #endif | |||
| #ifdef HAVE_PCAUDIOLIB_AUDIO_H | |||
| #if USE_LIBPCAUDIO | |||
| if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) | |||
| audio_object_flush(my_audio); | |||
| #endif | |||
| @@ -893,7 +893,7 @@ ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Cancel(void) | |||
| ESPEAK_API int espeak_IsPlaying(void) | |||
| { | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| return fifo_is_busy(); | |||
| #else | |||
| return 0; | |||
| @@ -903,7 +903,7 @@ ESPEAK_API int espeak_IsPlaying(void) | |||
| ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Synchronize(void) | |||
| { | |||
| espeak_ng_STATUS berr = err; | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| while (espeak_IsPlaying()) | |||
| usleep(20000); | |||
| #endif | |||
| @@ -913,14 +913,14 @@ ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Synchronize(void) | |||
| ESPEAK_NG_API espeak_ng_STATUS espeak_ng_Terminate(void) | |||
| { | |||
| #ifdef USE_ASYNC | |||
| #if USE_ASYNC | |||
| fifo_stop(); | |||
| fifo_terminate(); | |||
| event_terminate(); | |||
| #endif | |||
| if ((my_mode & ENOUTPUT_MODE_SPEAK_AUDIO) == ENOUTPUT_MODE_SPEAK_AUDIO) { | |||
| #ifdef HAVE_PCAUDIOLIB_AUDIO_H | |||
| #if USE_LIBPCAUDIO | |||
| audio_object_close(my_audio); | |||
| audio_object_destroy(my_audio); | |||
| my_audio = NULL; | |||
| @@ -54,14 +54,14 @@ extern "C" | |||
| #if defined(_WIN32) || defined(_WIN64) // Windows | |||
| #define PLATFORM_WINDOWS | |||
| #define PLATFORM_WINDOWS 1 | |||
| #define PATHSEP '\\' | |||
| #define N_PATH_HOME_DEF 230 | |||
| #define NO_VARIADIC_MACROS | |||
| #else | |||
| #define PLATFORM_POSIX | |||
| #define PLATFORM_POSIX 1 | |||
| #define PATHSEP '/' | |||
| #define N_PATH_HOME_DEF 160 | |||
| #define USE_NANOSLEEP | |||
| @@ -884,7 +884,7 @@ int ProcessSsmlTag(wchar_t *xml_buf, char *outbuf, int *outix, int n_outbuf, con | |||
| int wpm = speech_parameters[espeakRATE]; | |||
| espeak_SetParameter(espeakRATE, wpm, 0); | |||
| #if HAVE_SONIC_H | |||
| #if USE_LIBSONIC | |||
| if (wpm >= espeakRATE_MAXIMUM) { | |||
| // Compensate speedup with libsonic, see function SetSpeed() | |||
| double sonic = ((double)wpm)/espeakRATE_NORMAL; | |||
| @@ -50,7 +50,7 @@ | |||
| int mbrola_delay; | |||
| char mbrola_name[20]; | |||
| #ifdef INCLUDE_MBROLA | |||
| #if USE_MBROLA | |||
| #if defined(_WIN32) || defined(_WIN64) | |||
| #include <windows.h> | |||
| @@ -86,7 +86,7 @@ espeak_ng_STATUS LoadMbrolaTable(const char *mbrola_voice, const char *phtrans, | |||
| return ENS_MBROLA_NOT_FOUND; | |||
| sprintf(path, "%s/mbrola/%s", path_home, mbrola_voice); | |||
| #ifdef PLATFORM_POSIX | |||
| #if PLATFORM_POSIX | |||
| // if not found, then also look in | |||
| // usr/share/mbrola/xx, /usr/share/mbrola/xx/xx, /usr/share/mbrola/voices/xx | |||
| if (GetFileLength(path) <= 0) { | |||
| @@ -1040,7 +1040,7 @@ void DoPhonemeMarker(int type, int char_posn, int length, char *name) | |||
| } | |||
| } | |||
| #if HAVE_SONIC_H | |||
| #if USE_LIBSONIC | |||
| void DoSonicSpeed(int value) | |||
| { | |||
| // value, multiplier * 1024 | |||
| @@ -439,7 +439,7 @@ int FormantTransition2(frameref_t *seq, int *n_frames, unsigned int data1, unsig | |||
| void Write4Bytes(FILE *f, int value); | |||
| #if HAVE_SONIC_H | |||
| #if USE_LIBSONIC | |||
| void DoSonicSpeed(int value); | |||
| #endif | |||
| @@ -1176,7 +1176,7 @@ static void GetVoices(const char *path, int len_path_voices, int is_language_fil | |||
| { | |||
| char fname[sizeof(path_home)+100]; | |||
| #ifdef PLATFORM_WINDOWS | |||
| #if PLATFORM_WINDOWS | |||
| WIN32_FIND_DATAA FindFileData; | |||
| HANDLE hFind = INVALID_HANDLE_VALUE; | |||
| @@ -37,11 +37,11 @@ | |||
| #include "synthesize.h" // for WGEN_DATA, RESONATOR, frame_t | |||
| #include "mbrola.h" // for MbrolaFill, MbrolaReset, mbrola... | |||
| #ifdef INCLUDE_KLATT | |||
| #if USE_KLATT | |||
| #include "klatt.h" | |||
| #endif | |||
| #if HAVE_SONIC_H | |||
| #if USE_LIBSONIC | |||
| #include "sonic.h" | |||
| #endif | |||
| @@ -121,7 +121,7 @@ int wcmdq_tail = 0; | |||
| const int embedded_default[N_EMBEDDED_VALUES] = { 0, 50, espeakRATE_NORMAL, 100, 50, 0, 0, 0, espeakRATE_NORMAL, 0, 0, 0, 0, 0, 0 }; | |||
| static int embedded_max[N_EMBEDDED_VALUES] = { 0, 0x7fff, 750, 300, 99, 99, 99, 0, 750, 0, 0, 0, 0, 4, 0 }; | |||
| #if HAVE_SONIC_H | |||
| #if USE_LIBSONIC | |||
| static sonicStream sonicSpeedupStream = NULL; | |||
| static double sonicSpeed = 1.0; | |||
| #endif | |||
| @@ -239,7 +239,7 @@ void WcmdqStop() | |||
| wcmdq_head = 0; | |||
| wcmdq_tail = 0; | |||
| #if HAVE_SONIC_H | |||
| #if USE_LIBSONIC | |||
| if (sonicSpeedupStream != NULL) { | |||
| sonicDestroyStream(sonicSpeedupStream); | |||
| sonicSpeedupStream = NULL; | |||
| @@ -360,14 +360,14 @@ void WavegenInit(int rate, int wavemult_fact) | |||
| pk_shape = pk_shape2; | |||
| #ifdef INCLUDE_KLATT | |||
| #if USE_KLATT | |||
| KlattInit(); | |||
| #endif | |||
| } | |||
| void WavegenFini(void) | |||
| { | |||
| #ifdef INCLUDE_KLATT | |||
| #if USE_KLATT | |||
| KlattFini(); | |||
| #endif | |||
| } | |||
| @@ -1300,7 +1300,7 @@ static int WavegenFill2() | |||
| echo_complete -= length; | |||
| wdata.n_mix_wavefile = 0; | |||
| wdata.amplitude_fmt = 100; | |||
| #ifdef INCLUDE_KLATT | |||
| #if USE_KLATT | |||
| KlattReset(1); | |||
| #endif | |||
| result = PlaySilence(length, resume); | |||
| @@ -1308,7 +1308,7 @@ static int WavegenFill2() | |||
| case WCMD_WAVE: | |||
| echo_complete = echo_length; | |||
| wdata.n_mix_wavefile = 0; | |||
| #ifdef INCLUDE_KLATT | |||
| #if USE_KLATT | |||
| KlattReset(1); | |||
| #endif | |||
| result = PlayWave(length, resume, (unsigned char *)q[2], q[3] & 0xff, q[3] >> 8); | |||
| @@ -1333,7 +1333,7 @@ static int WavegenFill2() | |||
| echo_complete = echo_length; | |||
| result = Wavegen(length & 0xffff, q[1] >> 16, resume, (frame_t *)q[2], (frame_t *)q[3], wvoice); | |||
| break; | |||
| #ifdef INCLUDE_KLATT | |||
| #if USE_KLATT | |||
| case WCMD_KLATT2: // as WCMD_SPECT but stop any concurrent wave file | |||
| wdata.n_mix_wavefile = 0; // ... and drop through to WCMD_SPECT case | |||
| case WCMD_KLATT: | |||
| @@ -1355,15 +1355,17 @@ static int WavegenFill2() | |||
| case WCMD_EMBEDDED: | |||
| SetEmbedded(q[1], q[2]); | |||
| break; | |||
| #if USE_MBROLA | |||
| case WCMD_MBROLA_DATA: | |||
| if (wvoice != NULL) | |||
| result = MbrolaFill(length, resume, (general_amplitude * wvoice->voicing)/64); | |||
| break; | |||
| #endif | |||
| case WCMD_FMT_AMPLITUDE: | |||
| if ((wdata.amplitude_fmt = q[1]) == 0) | |||
| wdata.amplitude_fmt = 100; // percentage, but value=0 means 100% | |||
| break; | |||
| #if HAVE_SONIC_H | |||
| #if USE_LIBSONIC | |||
| case WCMD_SONIC_SPEED: | |||
| sonicSpeed = (double)q[1] / 1024; | |||
| if (sonicSpeedupStream && (sonicSpeed <= 1.0)) { | |||
| @@ -1386,7 +1388,7 @@ static int WavegenFill2() | |||
| return 0; | |||
| } | |||
| #if HAVE_SONIC_H | |||
| #if USE_LIBSONIC | |||
| // Speed up the audio samples with libsonic. | |||
| static int SpeedUp(short *outbuf, int length_in, int length_out, int end_of_text) | |||
| { | |||
| @@ -1412,7 +1414,7 @@ static int SpeedUp(short *outbuf, int length_in, int length_out, int end_of_text | |||
| int WavegenFill(void) | |||
| { | |||
| int finished; | |||
| #if HAVE_SONIC_H | |||
| #if USE_LIBSONIC | |||
| unsigned char *p_start; | |||
| p_start = out_ptr; | |||
| @@ -1420,7 +1422,7 @@ int WavegenFill(void) | |||
| finished = WavegenFill2(); | |||
| #if HAVE_SONIC_H | |||
| #if USE_LIBSONIC | |||
| if (sonicSpeed > 1.0) { | |||
| int length; | |||
| int max_length; | |||