@@ -1397,7 +1397,6 @@ int CompileVowelTransition(int which) | |||
if(which==1) | |||
{ | |||
// instn = i_VOWELIN; | |||
len = 50 / 2; // defaults for transition into vowel | |||
rms = 25 / 2; | |||
@@ -1813,8 +1812,6 @@ static int LoadWavefile(FILE *f, const char *fname) | |||
scale_factor = (max / 127) + 1; | |||
//fprintf(f_errors," sample len=%d max=%4x shift=%d\n",length,max,scale_factor); | |||
#define MIN_FACTOR -1 // was 6, disable use of 16 bit samples | |||
if(scale_factor > MIN_FACTOR) | |||
{ | |||
@@ -1822,7 +1819,6 @@ static int LoadWavefile(FILE *f, const char *fname) | |||
} | |||
Write4Bytes(f_phdata,length); | |||
// fwrite(&length,4,1,f_phdata); | |||
fseek(f,44,SEEK_SET); | |||
while(!feof(f)) | |||
@@ -2463,9 +2459,6 @@ static int CompileSwitch(int type) | |||
return(0); | |||
} | |||
// count_VowelStart = 0; | |||
// count_VowelEnding = 0; | |||
if(type == 1) | |||
*prog_out++ = i_SWITCH_PREVVOWEL+6; | |||
if(type == 2) | |||
@@ -3708,7 +3701,6 @@ espeak_ng_STATUS espeak_ng_CompileIntonation(FILE *log) | |||
break; | |||
} | |||
} | |||
// fprintf(f_errors,"tune %s (%d)\n", new_tune.name, tune_number); | |||
if(found == 2) | |||
{ | |||
error("Duplicate tune name: '%s'",new_tune.name); |
@@ -442,7 +442,6 @@ static int compile_line(char *linebuf, char *dict_line, int *hash) | |||
phonetic = word = nullstring; | |||
p = linebuf; | |||
// while(isspace2(*p)) p++; | |||
step = 0; | |||
@@ -541,10 +540,7 @@ static int compile_line(char *linebuf, char *dict_line, int *hash) | |||
{ | |||
multiple_numeric_hyphen = 1; | |||
} | |||
// else // ??? | |||
{ | |||
flag_codes[n_flag_codes++] = BITNUM_FLAG_HYPHENATED; | |||
} | |||
flag_codes[n_flag_codes++] = BITNUM_FLAG_HYPHENATED; | |||
c = ' '; | |||
} | |||
if(isspace2(c)) | |||
@@ -640,11 +636,6 @@ static int compile_line(char *linebuf, char *dict_line, int *hash) | |||
error_need_dictionary++; | |||
fprintf(f_log,"%5d: Need to compile dictionary again\n",linenum); | |||
} | |||
{ | |||
//char decoded_phonemes[128]; | |||
//DecodePhonemes(word_phonemes,decoded_phonemes); | |||
//printf("Translator %x %s [%s] [%s]\n",translator->translator_name,word,phonetic,decoded_phonemes); | |||
} | |||
} | |||
else | |||
{ | |||
@@ -1925,7 +1916,6 @@ int CompileDictionary(const char *dsource, const char *dict_name, FILE *log, cha | |||
dsource = ""; | |||
f_log = log; | |||
//f_log = fopen("log2.txt","w"); | |||
if(f_log == NULL) | |||
f_log = stderr; | |||
@@ -22,7 +22,6 @@ void debug_enter(const char* text) | |||
gettimeofday(&tv, NULL); | |||
// fd_log = fopen(FILENAME,"a"); | |||
if (!fd_log) | |||
{ | |||
debug_init(); | |||
@@ -31,7 +30,6 @@ void debug_enter(const char* text) | |||
if (fd_log) | |||
{ | |||
fprintf(fd_log, "%03d.%03dms > ENTER %s\n",(int)(tv.tv_sec%1000), (int)(tv.tv_usec/1000), text); | |||
// fclose(fd_log); | |||
} | |||
} | |||
@@ -40,7 +38,6 @@ void debug_show(const char *format, ...) | |||
{ | |||
va_list args; | |||
va_start(args, format); | |||
// fd_log = fopen(FILENAME,"a"); | |||
if (!fd_log) | |||
{ | |||
debug_init(); | |||
@@ -48,7 +45,6 @@ void debug_show(const char *format, ...) | |||
if (fd_log) | |||
{ | |||
vfprintf(fd_log, format, args); | |||
// fclose(fd_log); | |||
} | |||
va_end(args); | |||
} | |||
@@ -59,7 +55,6 @@ void debug_time(const char* text) | |||
gettimeofday(&tv, NULL); | |||
// fd_log = fopen(FILENAME,"a"); | |||
if (!fd_log) | |||
{ | |||
debug_init(); | |||
@@ -67,7 +62,6 @@ void debug_time(const char* text) | |||
if (fd_log) | |||
{ | |||
fprintf(fd_log, "%03d.%03dms > %s\n",(int)(tv.tv_sec%1000), (int)(tv.tv_usec/1000), text); | |||
// fclose(fd_log); | |||
} | |||
} | |||
@@ -6,8 +6,6 @@ extern "C" | |||
{ | |||
#endif | |||
//#define DEBUG_ENABLED | |||
#ifdef DEBUG_ENABLED | |||
#define ENTER(text) debug_enter(text) | |||
#define SHOW(format,...) debug_show(format,__VA_ARGS__); |
@@ -40,7 +40,6 @@ extern void print_dictionary_flags(unsigned int *flags, char *buf, int buf_len); | |||
extern char *DecodeRule(const char *group_chars, int group_length, char *rule, int control); | |||
// accented characters which indicate (in some languages) the start of a separate syllable | |||
//static const unsigned short diereses_list[7] = {L'ä',L'ë',L'ï',L'ö',L'ü',L'ÿ',0}; | |||
static const unsigned short diereses_list[7] = {0xe4,0xeb,0xef,0xf6,0xfc,0xff,0}; | |||
// convert characters to an approximate 7 bit ascii equivalent | |||
@@ -729,8 +728,6 @@ const char *GetTranslatedPhonemeString(int phoneme_mode) | |||
if(c != 0) | |||
{ | |||
buf += utf8_out(c, buf); | |||
// if(separate_phonemes) | |||
// *buf++ = separate_phonemes; | |||
} | |||
} | |||
} | |||
@@ -1481,7 +1478,6 @@ void SetWordStress(Translator *tr, char *output, unsigned int *dictionary_flags, | |||
{ | |||
int wt; | |||
int max_weight = -1; | |||
// int prev_stressed; | |||
// find the heaviest syllable, excluding the final syllable | |||
for(ix = 1; ix < (vowel_count-1); ix++) | |||
@@ -1491,7 +1487,6 @@ void SetWordStress(Translator *tr, char *output, unsigned int *dictionary_flags, | |||
if((wt = syllable_weight[ix]) >= max_weight) | |||
{ | |||
max_weight = wt; | |||
// prev_stressed = stressed_syllable; | |||
stressed_syllable = ix; | |||
} | |||
} | |||
@@ -1736,9 +1731,6 @@ void SetWordStress(Translator *tr, char *output, unsigned int *dictionary_flags, | |||
if((ph = phoneme_tab[phcode]) == NULL) | |||
continue; | |||
// if(ph->type == phSTRESS) | |||
// continue; | |||
if(ph->type == phPAUSE) | |||
{ | |||
tr->prev_last_stress = 0; | |||
@@ -3189,7 +3181,6 @@ static const char *LookupDict2(Translator *tr, const char *word, const char *wor | |||
for(ix=0; ix <= skipwords; ix++) | |||
{ | |||
if(wtab[ix].flags & FLAG_EMPHASIZED2) | |||
// if(((wflags2 = wtab[ix].flags) & FLAG_EMPHASIZED2) || ((ix > 0) && (wflags2 & FLAG_EMBEDDED))) | |||
{ | |||
condition_failed = 1; | |||
} | |||
@@ -3560,10 +3551,6 @@ int LookupDictList(Translator *tr, char **wordptr, char *ph_out, unsigned int *f | |||
fprintf(f_trans,"Replace: %s %s\n",word,*wordptr); | |||
} | |||
} | |||
else | |||
{ | |||
// flags[0] &= ~FLAG_SKIPWORDS; // check lang=hu január 21.-ig (error: suffix repeated ??) | |||
} | |||
ph_out[0] = 0; | |||
return(0); | |||
@@ -3649,7 +3636,6 @@ int RemoveEnding(Translator *tr, char *word, int end_type, char *word_copy) | |||
}; | |||
static const char *add_e_additions[] = { | |||
// "c", "rs", "ir", "ur", "ath", "ns", "lu", NULL }; | |||
"c", "rs", "ir", "ur", "ath", "ns", "u", NULL | |||
}; | |||
@@ -3765,7 +3751,6 @@ int RemoveEnding(Translator *tr, char *word, int end_type, char *word_copy) | |||
if((strcmp(ending,"s")==0) || (strcmp(ending,"es")==0)) | |||
end_flags |= FLAG_SUFX_S; | |||
// if(strcmp(ending,"'s")==0) | |||
if(ending[0] == '\'') | |||
end_flags &= ~FLAG_SUFX; // don't consider 's as an added suffix | |||
@@ -29,11 +29,8 @@ | |||
#include "debug.h" | |||
static unsigned int my_current_text_id=0; | |||
//<create_espeak_text | |||
t_espeak_command* create_espeak_text(const void *text, size_t size, unsigned int position, espeak_POSITION_TYPE position_type, unsigned int end_position, unsigned int flags, void* user_data) | |||
{ | |||
ENTER("create_espeak_text"); | |||
@@ -88,9 +85,6 @@ t_espeak_command* create_espeak_text(const void *text, size_t size, unsigned int | |||
return a_command; | |||
} | |||
//> | |||
t_espeak_command* create_espeak_terminated_msg(unsigned int unique_identifier, void* user_data) | |||
{ | |||
ENTER("create_espeak_terminated_msg"); | |||
@@ -128,10 +122,6 @@ t_espeak_command* create_espeak_terminated_msg(unsigned int unique_identifier, v | |||
} | |||
//<create_espeak_mark | |||
t_espeak_command* create_espeak_mark(const void *text, size_t size, const char *index_mark, unsigned int end_position, unsigned int flags, void* user_data) | |||
{ | |||
ENTER("create_espeak_mark"); | |||
@@ -189,8 +179,6 @@ t_espeak_command* create_espeak_mark(const void *text, size_t size, const char * | |||
return a_command; | |||
} | |||
//> | |||
//< create_espeak_key, create_espeak_char | |||
t_espeak_command* create_espeak_key(const char *key_name, void *user_data) | |||
{ | |||
@@ -257,9 +245,6 @@ t_espeak_command* create_espeak_char(wchar_t character, void* user_data) | |||
return a_command; | |||
} | |||
//> | |||
//< create_espeak_parameter | |||
t_espeak_command* create_espeak_parameter(espeak_PARAMETER parameter, int value, int relative) | |||
{ | |||
ENTER("create_espeak_parameter"); | |||
@@ -294,14 +279,10 @@ t_espeak_command* create_espeak_parameter(espeak_PARAMETER parameter, int value, | |||
return a_command; | |||
} | |||
//> | |||
//< create_espeak_punctuation_list | |||
t_espeak_command* create_espeak_punctuation_list(const wchar_t *punctlist) | |||
{ | |||
ENTER("create_espeak_punctuation_list"); | |||
int a_error=1; | |||
// wchar_t *a_list = NULL; | |||
t_espeak_command* a_command = (t_espeak_command*)malloc(sizeof(t_espeak_command)); | |||
if (!punctlist || !a_command) | |||
@@ -336,9 +317,6 @@ t_espeak_command* create_espeak_punctuation_list(const wchar_t *punctlist) | |||
return a_command; | |||
} | |||
//> | |||
//< create_espeak_voice_name, create_espeak_voice_spec | |||
t_espeak_command* create_espeak_voice_name(const char *name) | |||
{ | |||
ENTER("create_espeak_voice_name"); | |||
@@ -421,8 +399,6 @@ t_espeak_command* create_espeak_voice_spec(espeak_VOICE *voice) | |||
return a_command; | |||
} | |||
//> | |||
//< delete_espeak_command | |||
int delete_espeak_command( t_espeak_command* the_command) | |||
{ | |||
ENTER("delete_espeak_command"); | |||
@@ -522,8 +498,7 @@ int delete_espeak_command( t_espeak_command* the_command) | |||
} | |||
return a_status; | |||
} | |||
//> | |||
//< process_espeak_command | |||
void process_espeak_command( t_espeak_command* the_command) | |||
{ | |||
ENTER("process_espeak_command"); | |||
@@ -612,9 +587,6 @@ void process_espeak_command( t_espeak_command* the_command) | |||
} | |||
} | |||
//> | |||
//< process_espeak_command | |||
void display_espeak_command( t_espeak_command* the_command) | |||
{ | |||
ENTER("display_espeak_command"); | |||
@@ -702,4 +674,3 @@ void display_espeak_command( t_espeak_command* the_command) | |||
} | |||
#endif | |||
} | |||
//> |
@@ -20,8 +20,6 @@ | |||
// This source file is only used for asynchronious modes | |||
//<includes | |||
#ifndef PLATFORM_WINDOWS | |||
#include <unistd.h> | |||
#endif | |||
@@ -39,9 +37,6 @@ | |||
#include "event.h" | |||
#include "wave.h" | |||
#include "debug.h" | |||
//> | |||
//<decls and function prototypes | |||
// my_mutex: protects my_thread_is_talking, | |||
static pthread_mutex_t my_mutex; | |||
@@ -75,9 +70,6 @@ static void* pop(); | |||
static void init(); | |||
static void* polling_thread(void*); | |||
//> | |||
//<event_init | |||
void event_set_callback(t_espeak_callback* SynthCallback) | |||
{ | |||
my_callback = SynthCallback; | |||
@@ -110,8 +102,7 @@ void event_init(void) | |||
assert(thread_inited); | |||
pthread_attr_destroy(&a_attrib); | |||
} | |||
//> | |||
//<event_display | |||
static void event_display(espeak_EVENT* event) | |||
{ | |||
ENTER("event_display"); | |||
@@ -147,8 +138,6 @@ ENTER("event_display"); | |||
} | |||
#endif | |||
} | |||
//> | |||
//<event_copy | |||
static espeak_EVENT* event_copy (espeak_EVENT* event) | |||
{ | |||
@@ -184,9 +173,6 @@ static espeak_EVENT* event_copy (espeak_EVENT* event) | |||
return a_event; | |||
} | |||
//> | |||
//<event_notify | |||
// Call the user supplied callback | |||
// | |||
// Note: the current sequence is: | |||
@@ -223,7 +209,6 @@ ENTER("event_notify"); | |||
case espeakEVENT_END: | |||
case espeakEVENT_PHONEME: | |||
{ | |||
// jonsd - I'm not sure what this is for. gilles says it's for when Gnome Speech reads a file of blank lines | |||
if (a_old_uid != event->unique_identifier) | |||
{ | |||
espeak_EVENT_TYPE a_new_type = events[0].type; | |||
@@ -244,8 +229,6 @@ ENTER("event_notify"); | |||
} | |||
} | |||
} | |||
//> | |||
//<event_delete | |||
static int event_delete(espeak_EVENT* event) | |||
{ | |||
@@ -280,9 +263,6 @@ ENTER("event_delete"); | |||
return 1; | |||
} | |||
//> | |||
//<event_declare | |||
espeak_ERROR event_declare (espeak_EVENT* event) | |||
{ | |||
ENTER("event_declare"); | |||
@@ -310,23 +290,8 @@ ENTER("event_declare"); | |||
a_status = pthread_mutex_unlock(&my_mutex); | |||
} | |||
// TBD: remove the comment | |||
// reminder: code in comment. | |||
// This wait can lead to an underrun | |||
// | |||
// if (!a_status && !my_event_is_running && (a_error == EE_OK)) | |||
// { | |||
// // quit when command is actually started | |||
// // (for possible forthcoming 'end of command' checks) | |||
SHOW_TIME("event_declare > post my_sem_start_is_required\n"); | |||
sem_post(&my_sem_start_is_required); | |||
// int val=1; | |||
// while (val) | |||
// { | |||
// usleep(50000); // TBD: event? | |||
// sem_getvalue(&my_sem_start_is_required, &val); | |||
// } | |||
// } | |||
if (a_status != 0) | |||
{ | |||
@@ -336,9 +301,6 @@ ENTER("event_declare"); | |||
return a_error; | |||
} | |||
//> | |||
//<event_clear_all | |||
espeak_ERROR event_clear_all () | |||
{ | |||
ENTER("event_clear_all"); | |||
@@ -384,9 +346,6 @@ espeak_ERROR event_clear_all () | |||
return EE_OK; | |||
} | |||
//> | |||
//<sleep_until_timeout_or_stop_request | |||
static int sleep_until_timeout_or_stop_request(uint32_t time_in_ms) | |||
{ | |||
ENTER("sleep_until_timeout_or_stop_request"); | |||
@@ -428,8 +387,6 @@ ENTER("sleep_until_timeout_or_stop_request"); | |||
return a_stop_is_required; | |||
} | |||
//> | |||
//<get_remaining_time | |||
// Asked for the time interval required for reaching the sample. | |||
// If the stream is opened but the audio samples are not played, | |||
// a timeout is started. | |||
@@ -472,9 +429,6 @@ ENTER("get_remaining_time"); | |||
return err; | |||
} | |||
//> | |||
//<polling_thread | |||
static void* polling_thread(void*p) | |||
{ | |||
ENTER("polling_thread"); | |||
@@ -628,10 +582,8 @@ ENTER("polling_thread"); | |||
return NULL; | |||
} | |||
//> | |||
//<push, pop, init | |||
enum {MAX_NODE_COUNTER=1000}; | |||
// return 1 if ok, 0 otherwise | |||
static espeak_ERROR push(void* the_data) | |||
{ | |||
ENTER("event > push"); | |||
@@ -712,8 +664,6 @@ static void init() | |||
node_counter = 0; | |||
} | |||
//> | |||
//<event_terminate | |||
void event_terminate() | |||
{ | |||
ENTER("event_terminate"); | |||
@@ -730,4 +680,3 @@ ENTER("event_terminate"); | |||
thread_inited = 0; | |||
} | |||
} | |||
//> |
@@ -20,8 +20,6 @@ | |||
// This source file is only used for asynchronious modes | |||
//<includes | |||
#ifndef PLATFORM_WINDOWS | |||
#include <unistd.h> | |||
#endif | |||
@@ -41,9 +39,6 @@ | |||
#include "debug.h" | |||
//> | |||
//<decls and function prototypes | |||
// my_mutex: protects my_thread_is_talking, | |||
// my_stop_is_required, and the command fifo | |||
static pthread_mutex_t my_mutex; | |||
@@ -68,8 +63,6 @@ enum {MAX_NODE_COUNTER=400, | |||
MAX_INACTIVITY_CHECK=2 | |||
}; | |||
//> | |||
//<fifo_init | |||
void fifo_init() | |||
{ | |||
ENTER("fifo_init"); | |||
@@ -102,8 +95,6 @@ void fifo_init() | |||
} | |||
SHOW_TIME("fifo > get my_sem_stop_is_acknowledged\n"); | |||
} | |||
//> | |||
//<fifo_add_command | |||
espeak_ERROR fifo_add_command (t_espeak_command* the_command) | |||
{ | |||
@@ -143,9 +134,6 @@ espeak_ERROR fifo_add_command (t_espeak_command* the_command) | |||
return a_error; | |||
} | |||
//> | |||
//<fifo_add_commands | |||
espeak_ERROR fifo_add_commands (t_espeak_command* command1, t_espeak_command* command2) | |||
{ | |||
ENTER("fifo_add_command"); | |||
@@ -194,9 +182,6 @@ espeak_ERROR fifo_add_commands (t_espeak_command* command1, t_espeak_command* co | |||
return a_error; | |||
} | |||
//> | |||
//<fifo_stop | |||
espeak_ERROR fifo_stop () | |||
{ | |||
ENTER("fifo_stop"); | |||
@@ -239,37 +224,12 @@ espeak_ERROR fifo_stop () | |||
return EE_OK; | |||
} | |||
//> | |||
//<fifo_is_speaking | |||
int fifo_is_busy () | |||
{ | |||
// ENTER("isSpeaking"); | |||
// int aResult = (int) (my_command_is_running || WaveIsPlaying()); | |||
SHOW("fifo_is_busy > aResult = %d\n",my_command_is_running); | |||
return my_command_is_running; | |||
} | |||
// int pause () | |||
// { | |||
// ENTER("pause"); | |||
// // TBD | |||
// // if (espeakPause (espeakHandle, 1)) | |||
// return true; | |||
// } | |||
// int resume () | |||
// { | |||
// ENTER("resume"); | |||
// // TBD | |||
// // if (espeakPause (espeakHandle, 0)) | |||
// return true; | |||
// } | |||
//> | |||
//<sleep_until_start_request_or_inactivity | |||
static int sleep_until_start_request_or_inactivity() | |||
{ | |||
SHOW_TIME("fifo > sleep_until_start_request_or_inactivity > ENTER"); | |||
@@ -330,9 +290,6 @@ static int sleep_until_start_request_or_inactivity() | |||
return a_start_is_required; | |||
} | |||
//> | |||
//<close_stream | |||
static void close_stream() | |||
{ | |||
SHOW_TIME("fifo > close_stream > ENTER\n"); | |||
@@ -373,9 +330,6 @@ static void close_stream() | |||
SHOW_TIME("fifo > close_stream > LEAVE\n"); | |||
} | |||
//> | |||
//<say_thread | |||
static void* say_thread(void*p) | |||
{ | |||
ENTER("say_thread"); | |||
@@ -484,8 +438,6 @@ int fifo_is_command_enabled() | |||
return (0 == my_stop_is_required); | |||
} | |||
//> | |||
//<fifo | |||
typedef struct t_node | |||
{ | |||
t_espeak_command* data; | |||
@@ -494,7 +446,7 @@ typedef struct t_node | |||
static node* head=NULL; | |||
static node* tail=NULL; | |||
// return 1 if ok, 0 otherwise | |||
static espeak_ERROR push(t_espeak_command* the_command) | |||
{ | |||
ENTER("fifo > push"); | |||
@@ -569,10 +521,8 @@ static t_espeak_command* pop() | |||
return the_command; | |||
} | |||
static void init(int process_parameters) | |||
{ | |||
// Changed by Tyler Spivey 30.Nov.2011 | |||
t_espeak_command *c = NULL; | |||
ENTER("fifo > init"); | |||
c = pop(); | |||
@@ -587,9 +537,6 @@ static void init(int process_parameters) | |||
node_counter = 0; | |||
} | |||
//> | |||
//<fifo_init | |||
void fifo_terminate() | |||
{ | |||
ENTER("fifo_terminate"); | |||
@@ -602,4 +549,3 @@ void fifo_terminate() | |||
init(0); // purge fifo | |||
} | |||
//> |
@@ -651,9 +651,6 @@ static int calc_pitch_segment(int ix, int end_ix, TONE_HEAD *th, TONE_NUCLEUS *t | |||
syl = &syllable_tab[ix]; | |||
stress = syl->stress; | |||
// if(stress == PRIMARY_MARKED) | |||
// initial = 1; // reset the intonation pattern | |||
if(initial || (stress >= min_stress)) | |||
{ | |||
// a primary stress | |||
@@ -811,11 +808,6 @@ static int calc_pitches2(int start, int end, int tune_number) | |||
/* tonic syllable */ | |||
/******************/ | |||
// if(tn->flags & T_EMPH) | |||
// { | |||
// syllable_tab[ix].flags |= SYL_EMPHASIS; | |||
// } | |||
if(number_tail == 0) | |||
{ | |||
tone_pitch_env = tune->nucleus0_env; | |||
@@ -879,7 +871,6 @@ static int calc_pitches(int control, int start, int end, int tune_number) | |||
tone_posn = tone_posn2; // put tone on the penultimate stressed word | |||
} | |||
ix = calc_pitch_segment(ix,tone_posn, th, tn, PRIMARY, continuing); | |||
// ix = SetBodyIntonation(&tunes[0], ix, tone_posn, 0); | |||
if(no_tonic) | |||
return(0); | |||
@@ -936,7 +927,6 @@ static void CalcPitches_Tone(Translator *tr, int clause_tone) | |||
PHONEME_TAB *tph; | |||
PHONEME_TAB *prev_tph; // forget across word boundary | |||
PHONEME_TAB *prevw_tph; // remember across word boundary | |||
// PHONEME_TAB *prev2_tph; // 2 tones previous | |||
PHONEME_LIST *prev_p; | |||
int pitch_adjust = 0; // pitch gradient through the clause - inital value | |||
@@ -1059,7 +1049,6 @@ static void CalcPitches_Tone(Translator *tr, int clause_tone) | |||
} | |||
prev_p = p; | |||
// prev2_tph = prevw_tph; | |||
prevw_tph = prev_tph = tph; | |||
pause = 0; | |||
} |
@@ -36,7 +36,7 @@ | |||
#include "synthesize.h" | |||
#include "voice.h" | |||
extern unsigned char *out_ptr; // **JSD | |||
extern unsigned char *out_ptr; | |||
extern unsigned char *out_start; | |||
extern unsigned char *out_end; | |||
extern WGEN_DATA wdata; | |||
@@ -185,9 +185,6 @@ static void flutter(klatt_frame_ptr frame) | |||
fla = (double) kt_globals.f0_flutter / 50; | |||
flb = (double) kt_globals.original_f0 / 100; | |||
// flc = sin(2*PI*12.7*time_count); | |||
// fld = sin(2*PI*7.1*time_count); | |||
// fle = sin(2*PI*4.7*time_count); | |||
flc = sin(PI*12.7*time_count); // because we are calling flutter() more frequently, every 2.9mS | |||
fld = sin(PI*7.1*time_count); | |||
fle = sin(PI*4.7*time_count); | |||
@@ -767,15 +764,12 @@ static void pitch_synch_par_reset(klatt_frame_ptr frame) | |||
if (kt_globals.nopen >= (kt_globals.T0-1)) | |||
{ | |||
// printf("Warning: glottal open period cannot exceed T0, truncated\n"); | |||
kt_globals.nopen = kt_globals.T0 - 2; | |||
} | |||
if (kt_globals.nopen < 40) | |||
{ | |||
/* F0 max = 1000 Hz */ | |||
// printf("Warning: minimum glottal open period is 10 samples.\n"); | |||
// printf("truncated, nopen = %d\n",kt_globals.nopen); | |||
kt_globals.nopen = 40; | |||
} | |||
@@ -805,7 +799,6 @@ static void pitch_synch_par_reset(klatt_frame_ptr frame) | |||
temp = kt_globals.T0 - kt_globals.nopen; | |||
if (frame->Kskew > temp) | |||
{ | |||
// printf("Kskew duration=%d > glottal closed period=%d, truncate\n", frame->Kskew, kt_globals.T0 - kt_globals.nopen); | |||
frame->Kskew = temp; | |||
} | |||
if (skew >= 0) | |||
@@ -895,9 +888,6 @@ static void setzeroabc(long int f, long int bw, resonator_ptr rp) | |||
f = -f; | |||
//NOTE, changes made 30.09.2011 for Reece Dunn <[email protected]> | |||
// fix a sound spike when f=0 | |||
/* First compute ordinary resonator coefficients */ | |||
/* Let r = exp(-pi bw t) */ | |||
arg = kt_globals.minus_pi_t * bw; | |||
@@ -1220,10 +1210,6 @@ if(option_log_frames) | |||
klattp1[ix] = klattp[ix] = fr1->klattp[ix]; | |||
klattp_inc[ix] = (double)((fr2->klattp[ix] - klattp[ix]) * STEPSIZE)/length; | |||
} | |||
// get klatt parameter adjustments for the voice | |||
// if((ix>0) && (ix < KLATT_AVp)) | |||
// klattp1[ix] = klattp[ix] = (klattp[ix] + wvoice->klattv[ix]); | |||
} | |||
nsamples = length; | |||
@@ -1300,12 +1286,6 @@ void KlattInit() | |||
int ix; | |||
for(ix=0; ix<256; ix++) | |||
{ | |||
// TEST: Overwrite natural_samples2 | |||
// sawtooth wave | |||
// natural_samples2[ix] = (128-ix) * 20; | |||
} | |||
sample_count=0; | |||
kt_globals.synthesis_model = CASCADE_PARALLEL; |
@@ -295,7 +295,6 @@ static const unsigned short letter_accents_0e0[] = { | |||
CAPITAL, | |||
LETTER('z',M_CARON,0), | |||
LETTER('s',M_NAME,0), // long-s // U+17f | |||
// LETTER('b',M_STROKE,0), | |||
}; | |||
@@ -316,7 +315,7 @@ static const unsigned short letter_accents_250[] = { | |||
0, // open-e | |||
LETTER(L_OPEN_E,M_REVERSED,0), | |||
LETTER(L_OPEN_E,M_HOOK,M_REVERSED), | |||
0,//LETTER(L_OPEN_E,M_CLOSED,M_REVERSED), | |||
0, | |||
LETTER('j',M_BAR,0), | |||
LETTER('g',M_IMPLOSIVE,0), // U+260 | |||
LETTER('g',0,0), | |||
@@ -325,7 +324,7 @@ static const unsigned short letter_accents_250[] = { | |||
0, // ramshorn | |||
LETTER('h',M_TURNED,0), | |||
LETTER('h',M_HOOK,0), | |||
0,//LETTER(L_HENG,M_HOOK,0), | |||
0, | |||
LETTER('i',M_BAR,0), // U+268 | |||
LETTER(L_IOTA,0,0), | |||
LETTER('i',M_SMALLCAP,0), | |||
@@ -334,19 +333,19 @@ static const unsigned short letter_accents_250[] = { | |||
LETTER('l',M_RETROFLEX,0), | |||
LIGATURE('l','z',0), | |||
LETTER('m',M_TURNED,0), | |||
0,//LETTER('m',M_TURNED,M_LEG), // U+270 | |||
0, | |||
LETTER('m',M_HOOK,0), | |||
0,//LETTER('n',M_LEFTHOOK,0), | |||
0, | |||
LETTER('n',M_RETROFLEX,0), | |||
LETTER('n',M_SMALLCAP,0), | |||
LETTER('o',M_BAR,0), | |||
LIGATURE('o','e',M_SMALLCAP), | |||
0,//LETTER(L_OMEGA,M_CLOSED,0), | |||
0, | |||
LETTER(L_PHI,0,0), // U+278 | |||
LETTER('r',M_TURNED,0), | |||
LETTER(L_RLONG,M_TURNED,0), | |||
LETTER('r',M_RETROFLEX,M_TURNED), | |||
0,//LETTER('r',M_LEG,0), | |||
0, | |||
LETTER('r',M_RETROFLEX,0), | |||
0, // r-tap | |||
LETTER(L_RTAP,M_REVERSED,0), | |||
@@ -354,7 +353,7 @@ static const unsigned short letter_accents_250[] = { | |||
LETTER('r',M_TURNED,M_SMALLCAP), | |||
LETTER('s',M_RETROFLEX,0), | |||
0, // esh | |||
LETTER('j',M_HOOK,0), //LETTER('j',M_HOOK,M_BAR), | |||
LETTER('j',M_HOOK,0), | |||
LETTER(L_ESH,M_REVERSED,0), | |||
LETTER(L_ESH,M_CURL,0), | |||
LETTER('t',M_TURNED,0), | |||
@@ -373,10 +372,10 @@ static const unsigned short letter_accents_250[] = { | |||
0, // glottal stop | |||
LETTER(L_GLOTTAL,M_REVERSED,0), | |||
LETTER(L_GLOTTAL,M_TURNED,0), | |||
0,//LETTER('c',M_LONG,0), | |||
0, | |||
0, // bilabial click // U+298 | |||
LETTER('b',M_SMALLCAP,0), | |||
0,//LETTER(L_OPEN_E,M_CLOSED,0), | |||
0, | |||
LETTER('g',M_IMPLOSIVE,M_SMALLCAP), | |||
LETTER('h',M_SMALLCAP,0), | |||
LETTER('j',M_CURL,0), | |||
@@ -473,11 +472,7 @@ void LookupAccentedLetter(Translator *tr, unsigned int letter, char *ph_buf) | |||
{ | |||
if(accent2 != 0) | |||
{ | |||
if((flags2 = Lookup(tr, accents_tab[accent2].name, ph_accent2)) == 0) | |||
{ | |||
// break; | |||
} | |||
flags2 = Lookup(tr, accents_tab[accent2].name, ph_accent2); | |||
if(flags2 & FLAG_ACCENT_BEFORE) | |||
{ | |||
strcpy(ph_buf,ph_accent2); | |||
@@ -946,9 +941,6 @@ int TranslateLetter(Translator *tr, char *word, char *phonemes, int control) | |||
speak_letter_number = 0; | |||
} | |||
// if((ph_alphabet[0] != 0) && speak_letter_number) | |||
// ph_buf[0] = 0; // don't speak "letter" if we speak alphabet name | |||
if(speak_letter_number) | |||
{ | |||
if(al_offset == 0x2800) | |||
@@ -1814,7 +1806,6 @@ static int LookupNum3(Translator *tr, int value, char *ph_out, int suppress_null | |||
if(((tr->langopts.numbers & NUM_1900) != 0) && (hundreds == 19)) | |||
{ | |||
// speak numbers such as 1984 as years: nineteen-eighty-four | |||
// ph_100[0] = 0; // don't say "hundred", we also need to surpess "and" | |||
} | |||
else if(hundreds >= 10) | |||
{ | |||
@@ -2101,10 +2092,7 @@ static int TranslateNumber_1(Translator *tr, char *word, char *ph_out, unsigned | |||
if(prev_thousands || (word[0] != '0')) | |||
{ | |||
// don't check for ordinal if the number has a leading zero | |||
if((ordinal = CheckDotOrdinal(tr, word, &word[ix], wtab, 0)) != 0) | |||
{ | |||
// dot_ordinal = 1; | |||
} | |||
ordinal = CheckDotOrdinal(tr, word, &word[ix], wtab, 0); | |||
} | |||
if((word[ix] == '.') && !IsDigit09(word[ix+1]) && !IsDigit09(word[ix+2]) && !(wtab[1].flags & FLAG_NOSPACE)) | |||
@@ -2241,7 +2229,6 @@ static int TranslateNumber_1(Translator *tr, char *word, char *ph_out, unsigned | |||
if(thousands_inc > 0) | |||
{ | |||
if(thousandplex > 0) | |||
// if((thousandplex > 0) && (value < 1000)) | |||
{ | |||
if((suppress_null == 0) && (LookupThousands(tr,value,thousandplex, thousands_exact, ph_append))) | |||
{ | |||
@@ -2285,7 +2272,6 @@ static int TranslateNumber_1(Translator *tr, char *word, char *ph_out, unsigned | |||
} | |||
} | |||
// if((buf_digit_lookup[0] == 0) && (*p != '0') && (dot_ordinal==0)) | |||
if((buf_digit_lookup[0] == 0) && (*p != '0')) | |||
{ | |||
// LANG=hu ? | |||
@@ -2443,7 +2429,6 @@ static int TranslateNumber_1(Translator *tr, char *word, char *ph_out, unsigned | |||
utf8_in(&next_char,p); | |||
if(!iswalpha2(next_char) && (thousands_exact==0)) | |||
// if(!iswalpha2(next_char) && !((wtab[thousandplex].flags & FLAG_HYPHEN_AFTER) && (thousands_exact != 0))) | |||
strcat(ph_out,str_pause); // don't add pause for 100s, 6th, etc. | |||
} | |||
@@ -591,14 +591,6 @@ void MakePhonemeList(Translator *tr, int post_pause, int start_sentence) | |||
insert_ph = phdata.pd_param[pd_APPENDPHONEME]; | |||
} | |||
if(ph->phflags & phVOICED) | |||
{ | |||
// check that a voiced consonant is preceded or followed by a vowel or liquid | |||
// and if not, add a short schwa | |||
// not yet implemented | |||
} | |||
if(deleted == 0) | |||
{ | |||
phlist[ix].ph = ph; | |||
@@ -627,7 +619,6 @@ void MakePhonemeList(Translator *tr, int post_pause, int start_sentence) | |||
phlist[ix].newword = 0; | |||
} | |||
// phlist[ix].length = ph->std_length; | |||
phlist[ix].length = phdata.pd_param[i_SET_LENGTH]*2; | |||
if((ph->code == phonPAUSE_LONG) && (option_wordgap > 0) && (plist3[1].sourceix != 0)) | |||
{ |
@@ -64,10 +64,6 @@ static int sayas_mode; | |||
static int sayas_start; | |||
static int ssml_ignore_l_angle = 0; | |||
// alter tone for announce punctuation or capitals | |||
//static const char *tone_punct_on = "\0016T"; // add reverberation, lower pitch | |||
//static const char *tone_punct_off = "\001T\001P"; | |||
// punctuations symbols that can end a clause | |||
static const unsigned short punct_chars[] = {',','.','?','!',':',';', | |||
0x00a1, // inverted exclamation | |||
@@ -906,7 +902,7 @@ static int LoadSoundFile(const char *fname, int index) | |||
f = fopen(fname,"rb"); | |||
if(f == NULL) | |||
{ | |||
// fprintf(stderr,"Can't read temp file: %s\n",fname); | |||
fprintf(stderr,"Can't read temp file: %s\n",fname); | |||
return(3); | |||
} | |||
} | |||
@@ -1039,7 +1035,6 @@ static int AnnouncePunctuation(Translator *tr, int c1, int *c2_ptr, char *output | |||
if(punct_count==1) | |||
{ | |||
// sprintf(buf,"%s %s %s",tone_punct_on,punctname,tone_punct_off); | |||
sprintf(buf," %s",punctname); // we need the space before punctname, to ensure it doesn't merge with the previous word (eg. "2.-a") | |||
} | |||
else | |||
@@ -2702,7 +2697,6 @@ if(option_ssml) parag=1; | |||
if(!iswspace(c1)) | |||
{ | |||
if(!IsAlpha(c1) || !iswlower2(c1)) | |||
// if(iswdigit(c1) || (IsAlpha(c1) && !iswlower2(c1))) | |||
{ | |||
UngetC(c2); | |||
ungot_char2 = c1; | |||
@@ -2742,7 +2736,6 @@ if(option_ssml) parag=1; | |||
} | |||
if((iswspace(c2) || (punct_data & 0x8000) || IsBracket(c2) || (c2=='?') || Eof() || (c2 == ctrl_embedded))) // don't check for '-' because it prevents recognizing ':-)' | |||
// if((iswspace(c2) || (punct_data & 0x8000) || IsBracket(c2) || (c2=='?') || (c2=='-') || Eof())) | |||
{ | |||
// note: (c2='?') is for when a smart-quote has been replaced by '?' | |||
is_end_clause = 1; | |||
@@ -2832,7 +2825,6 @@ if(option_ssml) parag=1; | |||
if(iswlower2(c_next)) | |||
{ | |||
// next word has no capital letter, this dot is probably from an abbreviation | |||
// c1 = ' '; | |||
is_end_clause = 0; | |||
} | |||
if(any_alnum==0) | |||
@@ -2912,7 +2904,7 @@ if(option_ssml) parag=1; | |||
if(c1 == 0xe000 + '<') c1 = '<'; | |||
ix += utf8_out(c1,&buf[ix]); // buf[ix++] = c1; | |||
ix += utf8_out(c1,&buf[ix]); | |||
if(!iswspace(c1) && !IsBracket(c1)) | |||
{ | |||
charix[ix] = count_characters - clause_start_char; |
@@ -196,16 +196,6 @@ void SetSpeed(int control) | |||
return; | |||
} | |||
#ifdef TEST_SPEED | |||
if(wpm > 1000) | |||
{ | |||
// TESTING | |||
// test = wpm / 1000; | |||
wpm = wpm % 1000; | |||
} | |||
#endif | |||
if(wpm > 450) | |||
wpm = 450; | |||
@@ -283,13 +273,11 @@ speed.min_sample_len = (speed.min_sample_len * samplerate_native) / 22050; | |||
if(wpm > 430) | |||
{ | |||
speed.pause_factor = 12; | |||
// speed.clause_pause_factor = 15; | |||
} | |||
else | |||
if(wpm > 400) | |||
{ | |||
speed.pause_factor = 13; | |||
// speed.clause_pause_factor = 15; | |||
} | |||
else | |||
if(wpm > 374) | |||
@@ -309,11 +297,6 @@ speed.min_sample_len = (speed.min_sample_len * samplerate_native) / 22050; | |||
speed.clause_pause_factor = 16; | |||
} | |||
} | |||
#ifdef TEST_SPEED | |||
//if(control==3) | |||
printf("%3d: speedf %d %d %d x=%d pause=%d %d wav=%d lenmod=%d %d\n",wpm,speed1,speed2,speed3, speed_lookup[wpm2-80], speed.pause_factor,speed.clause_pause_factor, speed.wav_factor,speed.lenmod_factor,speed.lenmod2_factor); | |||
#endif | |||
} // end of SetSpeed | |||
#else // not using sonic speed-up | |||
@@ -335,15 +318,6 @@ void SetSpeed(int control) | |||
if(control == 2) | |||
wpm = embedded_value[EMBED_S2]; | |||
#ifdef TEST_SPEED | |||
if(wpm > 1000) | |||
{ | |||
// TESTING | |||
test = wpm / 1000; | |||
wpm = wpm % 1000; | |||
} | |||
#endif | |||
if(voice->speed_percent > 0) | |||
{ | |||
wpm = (wpm * voice->speed_percent)/100; | |||
@@ -422,13 +396,11 @@ void SetSpeed(int control) | |||
if(wpm > 430) | |||
{ | |||
speed.pause_factor = 12; | |||
// speed.clause_pause_factor = 15; | |||
} | |||
else | |||
if(wpm > 400) | |||
{ | |||
speed.pause_factor = 13; | |||
// speed.clause_pause_factor = 15; | |||
} | |||
else | |||
if(wpm > 374) | |||
@@ -448,11 +420,6 @@ void SetSpeed(int control) | |||
speed.clause_pause_factor = 16; | |||
} | |||
} | |||
#ifdef TEST_SPEED | |||
//if(control==3) | |||
printf("%3d: speedf %d %d %d pause=%d %d wav=%d lenmod=%d %d\n",wpm,speed1,speed2,speed3, speed.pause_factor,speed.clause_pause_factor, speed.wav_factor,speed.lenmod_factor,speed.lenmod2_factor); | |||
#endif | |||
} // end of SetSpeed | |||
#endif // of INCLUDE_SONIC | |||
@@ -986,7 +953,6 @@ if(p->type != phVOWEL) | |||
if(next->ph->mnemonic == ('/'*256+'r')) | |||
{ | |||
next->synthflags &= ~SFLAG_SEQCONTINUE; | |||
// min_drop = 15; | |||
} | |||
} | |||
} |
@@ -817,7 +817,6 @@ ENTER("espeak_Initialize"); | |||
option_phoneme_events = (options & (espeakINITIALIZE_PHONEME_EVENTS | espeakINITIALIZE_PHONEME_IPA)); | |||
VoiceReset(0); | |||
// SetVoiceByName("default"); | |||
for(param=0; param<N_SPEECH_PARAM; param++) | |||
param_stack[0].parameter[param] = saved_parameters[param] = param_defaults[param]; | |||
@@ -827,7 +826,6 @@ ENTER("espeak_Initialize"); | |||
SetParameter(espeakCAPITALS,option_capitals,0); | |||
SetParameter(espeakPUNCTUATION,option_punctuation,0); | |||
SetParameter(espeakWORDGAP,0,0); | |||
// DoVoiceChange(voice); | |||
#ifdef USE_ASYNC | |||
fifo_init(); | |||
@@ -1192,7 +1190,6 @@ ESPEAK_API espeak_ERROR espeak_Cancel(void) | |||
ESPEAK_API int espeak_IsPlaying(void) | |||
{//================================== | |||
// ENTER("espeak_IsPlaying"); | |||
#ifdef USE_ASYNC | |||
if((my_mode == AUDIO_OUTPUT_PLAYBACK) && wave_is_busy(my_audio)) | |||
return(1); |
@@ -83,7 +83,6 @@ float polint(float xa[],float ya[],int n,float x) | |||
w=c[i+1]-d[i]; | |||
if((den=ho-hp) == 0.0) | |||
{ | |||
// fprintf(stderr,"Error in routine 'polint'"); | |||
return(ya[2]); // two input xa are identical | |||
} | |||
den=w/den; |
@@ -40,12 +40,8 @@ extern "C" | |||
#define PLATFORM_POSIX | |||
#define PATHSEP '/' | |||
// USE_PORTAUDIO or USE_PULSEAUDIO are now defined in the makefile | |||
//#define USE_PORTAUDIO | |||
//#define USE_PULSEAUDIO | |||
#define USE_NANOSLEEP | |||
#define __cdecl | |||
//#define ESPEAK_API extern "C" | |||
#ifdef _ESPEAKEDIT | |||
#define LOG_FRAMES // write keyframe info to log-espeakedit |
@@ -192,7 +192,6 @@ espeak_ERROR LoadMbrolaTable(const char *mbrola_voice, const char *phtrans, int | |||
else | |||
SetParameter(espeakVOICETYPE,1,0); | |||
strcpy(mbrola_name,mbrola_voice); | |||
// mbrola_delay = 3800; // improve synchronization of events | |||
mbrola_delay = 1000; // improve synchronization of events | |||
return(EE_OK); | |||
} // end of LoadMbrolaTable | |||
@@ -475,7 +474,6 @@ int MbrolaTranslate(PHONEME_LIST *plist, int n_phonemes, int resume, FILE *f_mbr | |||
// a pause phoneme, which has not been changed by the translation | |||
name = '_'; | |||
len = (p->length * speed.pause_factor)/256; | |||
// if(len == 0) continue; | |||
if(len == 0) | |||
len = 1; | |||
} | |||
@@ -566,7 +564,6 @@ int MbrolaTranslate(PHONEME_LIST *plist, int n_phonemes, int resume, FILE *f_mbr | |||
InterpretPhoneme(NULL, 0, p, &phdata, NULL); | |||
fmtp.fmt_addr = phdata.sound_addr[pd_FMT]; | |||
len = DoSpect2(p->ph, 0, &fmtp, p, -1); | |||
// len = DoSpect(p->ph,prev->ph,phoneme_tab[phonPAUSE],2,p,-1); | |||
len = (len * 1000)/samplerate; | |||
if(next->type == phPAUSE) | |||
len += 50; |
@@ -97,7 +97,6 @@ void SynthesizeInit() | |||
syllable_centre = -1; | |||
// initialise next_pause, a dummy phoneme_list entry | |||
// next_pause.ph = phoneme_tab[phonPAUSE]; // this must be done after voice selection | |||
next_pause.type = phPAUSE; | |||
next_pause.newword = 0; | |||
} | |||
@@ -273,13 +272,6 @@ static int DoSample2(int index, int which, int std_length, int control, int leng | |||
if(wav_scale==0) | |||
min_length *= 2; // 16 bit samples | |||
else | |||
{ | |||
// increase consonant amplitude at high speeds, depending on the peak consonant amplitude | |||
// x = ((35 - wav_scale) * speed.loud_consonants); | |||
// if(x < 0) x = 0; | |||
// wav_scale = (wav_scale * (x+256))/256; | |||
} | |||
if(std_length > 0) | |||
{ | |||
@@ -313,11 +305,6 @@ static int DoSample2(int index, int which, int std_length, int control, int leng | |||
// don't let length exceed std_length | |||
length = std_length; | |||
} | |||
else | |||
{ | |||
// reduce the reduction in length | |||
// length = (length + std_length)/2; | |||
} | |||
} | |||
if(length < min_length) | |||
@@ -563,8 +550,6 @@ static void AdjustFormants(frame_t *fr, int target, int min, int max, int f1_adj | |||
{//==================================================================================================================== | |||
int x; | |||
//hf_reduce = 70; // ?? using fixed amount rather than the parameter?? | |||
target = (target * voice->formant_factor)/256; | |||
x = (target - fr->ffreq[2]) / 2; | |||
@@ -663,9 +648,6 @@ static short vcolouring[N_VCOLOUR][5] = { | |||
f1 = ((data2 >> 26) & 0x7); | |||
vcolour = (data2 >> 29); | |||
// fprintf(stderr,"FMT%d %3s %3d-%3d f1=%d f2=%4d %4d %4d f3=%4d %3d\n", | |||
// which,WordToString(other_ph->mnemonic),len,rms,f1,f2,f2_min,f2_max,f3_adj,f3_amp); | |||
if((other_ph != NULL) && (other_ph->mnemonic == '?')) | |||
flags |= 8; | |||
@@ -684,7 +666,6 @@ static short vcolouring[N_VCOLOUR][5] = { | |||
if(voice->klattv[0]) | |||
{ | |||
// fr->klattp[KLATT_AV] = 53; // reduce the amplituide of the start of a vowel | |||
fr->klattp[KLATT_AV] = seq[1].frame->klattp[KLATT_AV] - 4; | |||
} | |||
if(f2 != 0) | |||
@@ -710,7 +691,6 @@ if(voice->klattv[0]) | |||
if(flags & 8) | |||
{ | |||
// set_frame_rms(fr,next_rms - 5); | |||
modn_flags = 0x800 + (VowelCloseness(fr) << 8); | |||
} | |||
} | |||
@@ -1473,7 +1453,6 @@ int Generate(PHONEME_LIST *phoneme_list, int *n_ph, int resume) | |||
if(!next->newword) | |||
{ | |||
if(next->type==phLIQUID) released = 1; | |||
// if(((p->ph->phflags & phPLACE) == phPLACE_blb) && (next->ph->phflags & phSIBILANT)) released = 1; | |||
} | |||
if(released == 0) | |||
p->synthflags |= SFLAG_NEXT_PAUSE; |
@@ -300,8 +300,6 @@ static const char transpose_map_latin[] = { | |||
tr->langopts.roman_suffix = ""; | |||
SetLengthMods(tr,201); | |||
// tr->langopts.length_mods = length_mods_en; | |||
// tr->langopts.length_mods0 = length_mods_en0; | |||
tr->langopts.long_stop = 100; | |||
@@ -352,9 +350,6 @@ static const short pairs_ru[] = { | |||
0x1213, // тс 25076 | |||
0x1220, // яс 14310 | |||
0x7fff}; | |||
//0x040f ог 12976 | |||
//0x1306 ет 12826 | |||
//0x0f0d мо 12688 | |||
@@ -593,9 +588,7 @@ Translator *SelectTranslator(const char *name) | |||
SetupTranslator(tr,stress_lengths_cy,stress_amps_cy); | |||
tr->charset_a0 = charsets[14]; // ISO-8859-14 | |||
// tr->langopts.length_mods0 = tr->langopts.length_mods; // don't lengthen vowels in the last syllable | |||
tr->langopts.stress_rule = STRESSPOSN_2R; | |||
// tr->langopts.intonation_group = 4; | |||
// 'diminished' is an unstressed final syllable | |||
tr->langopts.stress_flags = S_FINAL_DIM_ONLY | S_FINAL_NO_2; | |||
@@ -636,7 +629,6 @@ Translator *SelectTranslator(const char *name) | |||
tr->langopts.param[LOPT_LONG_VOWEL_THRESHOLD] = 175/2; | |||
tr->langopts.numbers = NUM_DECIMAL_COMMA | NUM_SWAP_TENS | NUM_ALLOW_SPACE | NUM_ORDINAL_DOT | NUM_ROMAN; | |||
// tr->langopts.numbers = NUM_DECIMAL_COMMA | NUM_SWAP_TENS | NUM_OMIT_1_HUNDRED | NUM_OMIT_1_THOUSAND | NUM_ALLOW_SPACE | NUM_ORDINAL_DOT | NUM_ROMAN; | |||
SetLetterVowel(tr,'y'); | |||
tr->langopts.param[LOPT_UNPRONOUNCABLE] = 2; // use de_rules for unpronouncable rules | |||
} | |||
@@ -717,7 +709,6 @@ Translator *SelectTranslator(const char *name) | |||
case L('e','o'): | |||
{ | |||
// static const short stress_lengths_eo[8] = {150, 150, 230, 180, 0, 0, 300, 320}; | |||
static const short stress_lengths_eo[8] = {150, 140, 180, 180, 0, 0, 200, 200}; | |||
static const unsigned char stress_amps_eo[] = {16,14, 20,20, 20,22, 22,21 }; | |||
static const wchar_t eo_char_apostrophe[2] = {'l',0}; | |||
@@ -727,11 +718,9 @@ Translator *SelectTranslator(const char *name) | |||
tr->charset_a0 = charsets[3]; // ISO-8859-3 | |||
tr->char_plus_apostrophe = eo_char_apostrophe; | |||
// tr->langopts.word_gap = 1; | |||
tr->langopts.vowel_pause = 2; | |||
tr->langopts.stress_rule = STRESSPOSN_2R; | |||
tr->langopts.stress_flags = S_FINAL_DIM_ONLY | S_FINAL_NO_2; | |||
// tr->langopts.unstressed_wd1 = 3; | |||
tr->langopts.unstressed_wd2 = 2; | |||
tr->langopts.numbers = NUM_DECIMAL_COMMA | NUM_OMIT_1_HUNDRED | NUM_ALLOW_SPACE | NUM_ROMAN; | |||
@@ -856,7 +845,6 @@ Translator *SelectTranslator(const char *name) | |||
tr->langopts.numbers = NUM_DECIMAL_COMMA + NUM_ALLOW_SPACE; | |||
SetLetterVowel(tr,'y'); | |||
// tr->langopts.max_initial_consonants = 2; // BUT foreign words may have 3 | |||
tr->langopts.spelling_stress = 1; | |||
tr->langopts.intonation_group = 3; // less intonation, don't raise pitch at comma | |||
} | |||
@@ -972,7 +960,6 @@ SetupTranslator(tr,stress_lengths_equal,stress_amps_equal); | |||
case L('h','t'): // Haitian Creole | |||
// memcpy(tr->stress_lengths,stress_lengths_fr,sizeof(tr->stress_lengths)); | |||
tr->langopts.stress_rule = STRESSPOSN_1R; // stress on final syllable | |||
tr->langopts.stress_flags = S_NO_AUTO_2 | S_FINAL_DIM; // don't use secondary stress | |||
tr->langopts.numbers = NUM_SINGLE_STRESS | NUM_OMIT_1_HUNDRED | NUM_NOPAUSE | NUM_ROMAN | NUM_VIGESIMAL | NUM_DFRACTION_4; | |||
@@ -1025,7 +1012,6 @@ SetLengthMods(tr,3); // all equal | |||
SetLetterBits(tr,LETTERGP_C,hy_consonants2); // add 'j' | |||
tr->langopts.max_initial_consonants = 6; | |||
tr->langopts.numbers = NUM_DECIMAL_COMMA | NUM_ALLOW_SPACE | NUM_OMIT_1_HUNDRED; | |||
// tr->langopts.param[LOPT_UNPRONOUNCABLE] = 1; // disable check for unpronouncable words | |||
} | |||
break; | |||
@@ -1099,7 +1085,6 @@ SetLengthMods(tr,3); // all equal | |||
SetupTranslator(tr,stress_lengths_jbo,NULL); | |||
tr->langopts.stress_rule = STRESSPOSN_2R; | |||
tr->langopts.vowel_pause = 0x20c; // pause before a word which starts with a vowel, or after a word which ends in a consonant | |||
// tr->langopts.word_gap = 1; | |||
tr->punct_within_word = jbo_punct_within_word; | |||
tr->langopts.param[LOPT_CAPS_IN_WORD] = 2; // capitals indicate stressed syllables | |||
SetLetterVowel(tr,'y'); | |||
@@ -1123,7 +1108,6 @@ SetLengthMods(tr,3); // all equal | |||
tr->langopts.stress_rule = STRESSPOSN_1L; | |||
tr->langopts.stress_flags = S_FINAL_NO_2; | |||
tr->letter_bits_offset = OFFSET_GEORGIAN; | |||
// tr->langopts.param[LOPT_UNPRONOUNCABLE] = 1; // disable check for unpronouncable words | |||
tr->langopts.max_initial_consonants = 7; | |||
tr->langopts.numbers = NUM_VIGESIMAL | NUM_AND_UNITS | NUM_OMIT_1_HUNDRED |NUM_OMIT_1_THOUSAND | NUM_DFRACTION_5 | NUM_ROMAN; | |||
@@ -1635,7 +1619,6 @@ SetLengthMods(tr,3); // all equal | |||
tr->langopts.stress_rule = STRESSPOSN_1L; | |||
tr->langopts.word_gap = 0x21; // length of a final vowel is less dependent on the next consonant, don't merge consonant with next word | |||
// tr->langopts.vowel_pause = 4; | |||
tr->letter_groups[0] = tr->letter_groups[7] = vowels_vi; | |||
tr->langopts.tone_language = 1; // Tone language, use CalcPitches_Tone() rather than CalcPitches() | |||
tr->langopts.unstressed_wd1 = 2; | |||
@@ -1729,115 +1712,3 @@ static void Translator_Russian(Translator *tr) | |||
tr->langopts.testing = 2; | |||
} // end of Translator_Russian | |||
/* | |||
typedef struct { | |||
int flags; | |||
unsigned char stress; // stress level of this vowel | |||
unsigned char stress_highest; // the highest stress level of a vowel in this word | |||
unsigned char n_vowels; // number of vowels in the word | |||
unsigned char vowel_this; // syllable number of this vowel (counting from 1) | |||
unsigned char vowel_stressed; // syllable number of the highest stressed vowel | |||
} CHANGEPH; | |||
*/ | |||
#ifdef RUSSIAN2 | |||
// This is now done in the phoneme data, ph_russian | |||
int ChangePhonemes_ru(Translator *tr, PHONEME_LIST2 *phlist, int n_ph, int index, PHONEME_TAB *ph, CHANGEPH *ch) | |||
{//============================================================================================================= | |||
// Called for each phoneme in the phoneme list, to allow a language to make changes | |||
// ph The current phoneme | |||
int variant; | |||
int vowelix; | |||
PHONEME_TAB *prev, *next; | |||
if(ch->flags & 8) | |||
return(0); // full phoneme translation has already been given | |||
// Russian vowel softening and reduction rules | |||
if(ph->type == phVOWEL) | |||
{ | |||
int prestressed = ch->vowel_stressed==ch->vowel_this+1; // the next vowel after this has the main stress | |||
#define N_VOWELS_RU 11 | |||
static unsigned int vowels_ru[N_VOWELS_RU] = {'a','V','O','I',PH('I','#'),PH('E','#'),PH('E','2'), | |||
PH('V','#'),PH('I','3'),PH('I','2'),PH('E','3')}; | |||
static unsigned int vowel_replace[N_VOWELS_RU][6] = { | |||
// stressed, soft, soft-stressed, j+stressed, j+soft, j+soft-stressed | |||
/*0*/ {'A', 'I', PH('j','a'), 'a', 'a', 'a'}, // a Uses 3,4,5 columns. | |||
/*1*/ {'A', 'V', PH('j','a'), 'a', 'V', 'a'}, // V Uses 3,4,5 columns. | |||
/*2*/ {'o', '8', '8', 'o', '8', '8'}, // O | |||
/*3*/ {'i', 'I', 'i', 'a', 'I', 'a'}, // I Uses 3,4,5 columns. | |||
/*4*/ {'i', PH('I','#'), 'i', 'i', PH('I','#'), 'i'}, // I# | |||
/*5*/ {'E', PH('E','#'), 'E', 'e', PH('E','#'), 'e'}, // E# | |||
/*6*/ {'E', PH('E','2'), 'E', 'e', PH('E','2'), 'e'}, // E2 Uses 3,4,5 columns. | |||
/*7*/ {PH('j','a'), 'V', PH('j','a'), 'A', 'V', 'A'}, // V# | |||
/*8*/ {PH('j','a'), 'I', PH('j','a'), 'e', 'I', 'e'}, // I3 Uses 3,4,5 columns. | |||
/*9*/ {'e', 'I', 'e', 'e', 'I', 'e'}, // I2 | |||
/*10*/ {'e', PH('E', '2'), 'e', 'e', PH('E','2'), 'e'} // E3 | |||
}; | |||
prev = phoneme_tab[phlist[index-1].phcode]; | |||
next = phoneme_tab[phlist[index+1].phcode]; | |||
// lookup the vowel name to get an index into the vowel_replace[] table | |||
for(vowelix=0; vowelix<N_VOWELS_RU; vowelix++) | |||
{ | |||
if(vowels_ru[vowelix] == ph->mnemonic) | |||
break; | |||
} | |||
if(vowelix == N_VOWELS_RU) | |||
return(0); | |||
if(prestressed) | |||
{ | |||
if((vowelix==6)&&(prev->mnemonic=='j')) | |||
vowelix=8; | |||
if(vowelix==1) | |||
vowelix=0; | |||
if(vowelix==4) | |||
vowelix=3; | |||
if(vowelix==6) | |||
vowelix=5; | |||
if(vowelix==7) | |||
vowelix=8; | |||
if(vowelix==10) | |||
vowelix=9; | |||
} | |||
// do we need a variant of this vowel, depending on the stress and adjacent phonemes ? | |||
variant = -1; | |||
int stressed = ch->flags & 2; | |||
int soft=prev->phflags & phPALATAL; | |||
if (soft && stressed) | |||
variant = 2; else | |||
if (stressed) | |||
variant = 0; else | |||
if (soft) | |||
variant = 1; | |||
if(variant >= 0) | |||
{ | |||
if(prev->mnemonic == 'j') | |||
variant += 3; | |||
phlist[index].phcode = PhonemeCode(vowel_replace[vowelix][variant]); | |||
} | |||
else | |||
{ | |||
phlist[index].phcode = PhonemeCode(vowels_ru[vowelix]); | |||
} | |||
} | |||
return(0); | |||
} | |||
#endif | |||
@@ -121,10 +121,6 @@ static const unsigned short brackets[] = { | |||
// other characters which break a word, but don't produce a pause | |||
static const unsigned short breaks[] = {'_', 0}; | |||
// treat these characters as spaces, in addition to iswspace() | |||
// static const wchar_t chars_space[] = {0x2500,0x2501,0}; // box drawing horiz | |||
// Translate character codes 0xA0 to 0xFF into their unicode values | |||
// ISO_8859_1 is set as default | |||
static const unsigned short ISO_8859_1[0x60] = { | |||
@@ -1081,10 +1077,6 @@ int TranslateWord(Translator *tr, char *word_start, int next_pause, WORD_TAB *wt | |||
{ | |||
if(word_length > 1) | |||
return(FLAG_SPELLWORD); // a mixture of languages, retranslate as individual letters, separated by spaces | |||
if(phonemes[0] == phonSWITCH) | |||
{ | |||
// problem with espeak -vbg "b.c.d.e.f" | |||
} | |||
return(0); | |||
} | |||
strcpy(word_phonemes, phonemes); | |||
@@ -2977,7 +2969,6 @@ void *TranslateClause(Translator *tr, FILE *f_text, const void *vp_input, int *t | |||
c = ' '; // lower case followed by upper case, treat as new word | |||
space_inserted = 1; | |||
prev_in_save = c; | |||
// next_word_flags |= FLAG_NOSPACE; // problem: prevents FLAG_HAS_DOT being set | |||
} | |||
} | |||
else if((c != ' ') && iswupper2(prev_in) && iswlower2(next_in)) |
@@ -50,11 +50,8 @@ MNEM_TAB genders [] = { | |||
}; | |||
int tone_points[12] = {600,170, 1200,135, 2000,110, 3000,110, -1,0}; | |||
//int tone_points[12] = {250,200, 400,170, 600,170, 1200,135, 2000,110, -1,0}; | |||
// limit the rate of change for each formant number | |||
//static int formant_rate_22050[9] = {50, 104, 165, 230, 220, 220, 220, 220, 220}; // values for 22kHz sample rate | |||
//static int formant_rate_22050[9] = {240, 180, 180, 180, 180, 180, 180, 180, 180}; // values for 22kHz sample rate | |||
static int formant_rate_22050[9] = {240, 170, 170, 170, 170, 170, 170, 170, 170}; // values for 22kHz sample rate | |||
int formant_rate[9]; // values adjusted for actual sample rate | |||
@@ -166,7 +163,6 @@ static MNEM_TAB keyword_tab[] = { | |||
// these just set a value in langopts.param[] | |||
{"l_dieresis", 0x100+LOPT_DIERESES}, | |||
// {"l_lengthen", 0x100+LOPT_IT_LENGTHEN}, | |||
{"l_prefix", 0x100+LOPT_PREFIXES}, | |||
{"l_regressive_v", 0x100+LOPT_REGRESSIVE_VOICING}, | |||
{"l_unpronouncable", 0x100+LOPT_UNPRONOUNCABLE}, | |||
@@ -407,8 +403,6 @@ void VoiceReset(int tone_only) | |||
int pk; | |||
static unsigned char default_heights[N_PEAKS] = {130,128,120,116,100,100,128,128,128}; // changed for v.1.47 | |||
static unsigned char default_widths[N_PEAKS] = {140,128,128,160,171,171,128,128,128}; | |||
// static unsigned char default_heights[N_PEAKS] = {128,128,120,120,110,110,128,128,128}; // previous version | |||
// static unsigned char default_widths[N_PEAKS] = {128,128,128,160,171,171,128,128,128}; | |||
static int breath_widths[N_PEAKS] = {0,200,200,400,400,400,600,600,600}; | |||
@@ -416,10 +410,6 @@ void VoiceReset(int tone_only) | |||
voice->pitch_base = 0x47000; | |||
voice->pitch_range = 4104; | |||
// default is: pitch 80,117 | |||
// voice->pitch_base = 0x47000; | |||
// voice->pitch_range = 3996; | |||
voice->formant_factor = 256; | |||
voice->speed_percent = 100; | |||
@@ -461,7 +451,6 @@ void VoiceReset(int tone_only) | |||
// This table provides the opportunity for tone control. | |||
// Adjustment of harmonic amplitudes, steps of 8Hz | |||
// value of 128 means no change | |||
// memset(voice->tone_adjust,128,sizeof(voice->tone_adjust)); | |||
SetToneAdjust(voice,tone_points); | |||
// default values of speed factors |
@@ -42,8 +42,6 @@ | |||
#include "wave.h" | |||
#include "debug.h" | |||
//<Definitions | |||
#ifdef NEED_STRUCT_TIMESPEC | |||
#define HAVE_STRUCT_TIMESPEC 1 | |||
struct timespec { | |||
@@ -215,7 +213,6 @@ static t_wave_callback* my_callback_is_output_enabled=NULL; | |||
#define MAX_SAMPLE_RATE 22050 | |||
#define FRAMES_PER_BUFFER 512 | |||
#define BUFFER_LENGTH (MAX_SAMPLE_RATE*2*sizeof(uint16_t)) | |||
//#define THRESHOLD (BUFFER_LENGTH/5) | |||
static char myBuffer[BUFFER_LENGTH]; | |||
static char* myRead=NULL; | |||
static char* myWrite=NULL; | |||
@@ -245,9 +242,6 @@ static PaError pa_init_err=0; | |||
static uint32_t myReadPosition = 0; // in ms | |||
static uint32_t myWritePosition = 0; | |||
//> | |||
//<init_buffer, get_used_mem | |||
static void init_buffer() | |||
{ | |||
myWrite = myBuffer; | |||
@@ -281,9 +275,6 @@ static unsigned int get_used_mem() | |||
return used; | |||
} | |||
//> | |||
//<start stream | |||
static void start_stream() | |||
{ | |||
PaError err; | |||
@@ -309,9 +300,6 @@ static void start_stream() | |||
#endif | |||
} | |||
//> | |||
//<pa_callback | |||
/* This routine will be called by the PortAudio engine when audio is needed. | |||
** It may called at interrupt level on some machines so don't do anything | |||
** that could mess up the system like calling malloc() or free(). | |||
@@ -352,7 +340,6 @@ static int pa_callback(void *inputBuffer, void *outputBuffer, | |||
} | |||
char* p = (char*)outputBuffer + aUsedMem; | |||
memset(p, 0, n - aUsedMem); | |||
// myReadPosition += aUsedMem/(out_channels*sizeof(uint16_t)); | |||
myRead = aWrite; | |||
} | |||
} | |||
@@ -402,30 +389,12 @@ static int pa_callback(void *inputBuffer, void *outputBuffer, | |||
size_t aUsedMem = aTopMem + aRest; | |||
char* p = (char*)outputBuffer + aUsedMem; | |||
memset(p, 0, n - aUsedMem); | |||
// myReadPosition += aUsedMem/(out_channels*sizeof(uint16_t)); | |||
myRead = aWrite; | |||
} | |||
} | |||
SHOW("pa_callback > myRead=%x\n",(int)myRead); | |||
// #if USE_PORTAUDIO == 18 | |||
// if(aBufferEmpty) | |||
// { | |||
// static int end_timer = 0; | |||
// if(end_timer == 0) | |||
// end_timer = 4; | |||
// if(end_timer > 0) | |||
// { | |||
// end_timer--; | |||
// if(end_timer == 0) | |||
// return(1); | |||
// } | |||
// } | |||
// return(0); | |||
// #else | |||
#ifdef ARCH_BIG | |||
{ | |||
// BIG-ENDIAN, swap the order of bytes in each sound sample in the portaudio buffer | |||
@@ -444,14 +413,9 @@ static int pa_callback(void *inputBuffer, void *outputBuffer, | |||
} | |||
#endif | |||
return(aResult); | |||
//#endif | |||
} // end of WaveCallBack | |||
//> | |||
void wave_flush(void* theHandler) | |||
{ | |||
@@ -459,16 +423,10 @@ void wave_flush(void* theHandler) | |||
if (my_stream_could_start) | |||
{ | |||
// #define buf 1024 | |||
// static char a_buffer[buf*2]; | |||
// memset(a_buffer,0,buf*2); | |||
// wave_write(theHandler, a_buffer, buf*2); | |||
start_stream(); | |||
} | |||
} | |||
//<wave_open_sound | |||
static int wave_open_sound() | |||
{ | |||
ENTER("wave_open_sound"); | |||
@@ -492,7 +450,6 @@ static int wave_open_sound() | |||
out_channels = 1; | |||
#if USE_PORTAUDIO == 18 | |||
// err = Pa_OpenDefaultStream(&pa_stream,0,1,paInt16,wave_samplerate,FRAMES_PER_BUFFER,N_WAV_BUF,pa_callback,(void *)userdata); | |||
PaDeviceID playbackDevice = Pa_GetDefaultOutputDeviceID(); | |||
@@ -509,7 +466,6 @@ static int wave_open_sound() | |||
NULL, | |||
/* general parameters */ | |||
wave_samplerate, FRAMES_PER_BUFFER, 0, | |||
//paClipOff | paDitherOff, | |||
paNoFlag, | |||
pa_callback, (void *)userdata); | |||
@@ -520,7 +476,6 @@ static int wave_open_sound() | |||
SHOW_TIME("wave_open_sound > try stereo"); | |||
// failed to open with mono, try stereo | |||
out_channels = 2; | |||
// myOutputParameters.channelCount = out_channels; | |||
PaError err = Pa_OpenStream( &pa_stream, | |||
/* capture parameters */ | |||
paNoDevice, | |||
@@ -534,13 +489,8 @@ static int wave_open_sound() | |||
NULL, | |||
/* general parameters */ | |||
wave_samplerate, FRAMES_PER_BUFFER, 0, | |||
//paClipOff | paDitherOff, | |||
paNoFlag, | |||
pa_callback, (void *)userdata); | |||
// err = Pa_OpenDefaultStream(&pa_stream,0,2,paInt16, | |||
// wave_samplerate, | |||
// FRAMES_PER_BUFFER, | |||
// N_WAV_BUF,pa_callback,(void *)userdata); | |||
SHOW("wave_open_sound > Pa_OpenDefaultStream(2): err=%d (%s)\n",err, Pa_GetErrorText(err)); | |||
err=0; // avoid warning | |||
} | |||
@@ -555,7 +505,6 @@ static int wave_open_sound() | |||
wave_samplerate, | |||
framesPerBuffer, | |||
paNoFlag, | |||
// paClipOff | paDitherOff, | |||
pa_callback, | |||
(void *)userdata); | |||
if ((err!=paNoError) | |||
@@ -590,8 +539,6 @@ static int wave_open_sound() | |||
// paClipOff | paDitherOff, | |||
pa_callback, | |||
(void *)userdata); | |||
// err = Pa_OpenDefaultStream(&pa_stream,0,2,paInt16,(double)wave_samplerate,FRAMES_PER_BUFFER,pa_callback,(void *)userdata); | |||
} | |||
mInCallbackFinishedState = false; | |||
#endif | |||
@@ -602,15 +549,10 @@ static int wave_open_sound() | |||
return (err != paNoError); | |||
} | |||
//> | |||
//<select_device | |||
#if (USE_PORTAUDIO == 19) | |||
static void update_output_parameters(int selectedDevice, const PaDeviceInfo *deviceInfo) | |||
{ | |||
// const PaDeviceInfo *pdi = Pa_GetDeviceInfo(i); | |||
myOutputParameters.device = selectedDevice; | |||
// myOutputParameters.channelCount = pdi->maxOutputChannels; | |||
myOutputParameters.channelCount = 1; | |||
myOutputParameters.sampleFormat = paInt16; | |||
@@ -620,8 +562,6 @@ static void update_output_parameters(int selectedDevice, const PaDeviceInfo *dev | |||
if (deviceInfo) | |||
{ | |||
double aLatency = deviceInfo->defaultLowOutputLatency; | |||
// double aCoeff = round(0.100 / aLatency); | |||
// myOutputParameters.suggestedLatency = aCoeff * aLatency; // to avoid glitches ? | |||
myOutputParameters.suggestedLatency = aLatency; // for faster response ? | |||
SHOW("Device=%d, myOutputParameters.suggestedLatency=%f, aCoeff=%f\n", | |||
selectedDevice, | |||
@@ -635,7 +575,6 @@ static void update_output_parameters(int selectedDevice, const PaDeviceInfo *dev | |||
selectedDevice, | |||
myOutputParameters.suggestedLatency); | |||
} | |||
//pdi->defaultLowOutputLatency; | |||
myOutputParameters.hostApiSpecificStreamInfo = NULL; | |||
} | |||
@@ -727,49 +666,11 @@ static void select_device(const char* the_api) | |||
#endif | |||
} | |||
//> | |||
// int wave_Close(void* theHandler) | |||
// { | |||
// SHOW_TIME("WaveCloseSound"); | |||
// // PaError active; | |||
// // check whether speaking has finished, and close the stream | |||
// if(pa_stream != NULL) | |||
// { | |||
// Pa_CloseStream(pa_stream); | |||
// pa_stream = NULL; | |||
// init_buffer(); | |||
// // #if USE_PORTAUDIO == 18 | |||
// // active = Pa_StreamActive(pa_stream); | |||
// // #else | |||
// // active = Pa_IsStreamActive(pa_stream); | |||
// // #endif | |||
// // if(active == 0) | |||
// // { | |||
// // SHOW_TIME("WaveCloseSound > ok, not active"); | |||
// // Pa_CloseStream(pa_stream); | |||
// // pa_stream = NULL; | |||
// // return(1); | |||
// // } | |||
// } | |||
// return(0); | |||
// } | |||
//<wave_set_callback_is_output_enabled | |||
void wave_set_callback_is_output_enabled(t_wave_callback* cb) | |||
{ | |||
my_callback_is_output_enabled = cb; | |||
} | |||
//> | |||
//<wave_init | |||
// TBD: the arg could be "alsa", "oss",... | |||
int wave_init(int srate) | |||
{ | |||
ENTER("wave_init"); | |||
@@ -790,16 +691,11 @@ int wave_init(int srate) | |||
return err == paNoError; | |||
} | |||
//> | |||
//<wave_open | |||
void* wave_open(const char* the_api) | |||
{ | |||
ENTER("wave_open"); | |||
static int once=0; | |||
// TBD: the_api (e.g. "alsa") is not used at the moment | |||
// select_device is called once | |||
if (!once) | |||
{ | |||
select_device("alsa"); | |||
@@ -808,10 +704,6 @@ void* wave_open(const char* the_api) | |||
return((void*)1); | |||
} | |||
//> | |||
//<copyBuffer | |||
static size_t copyBuffer(char* dest, char* src, const size_t theSizeInBytes) | |||
{ | |||
size_t bytes_written = 0; | |||
@@ -847,9 +739,6 @@ static size_t copyBuffer(char* dest, char* src, const size_t theSizeInBytes) | |||
return bytes_written; | |||
} | |||
//> | |||
//<wave_write | |||
size_t wave_write(void* theHandler, char* theMono16BitsWaveBuffer, size_t theSize) | |||
{ | |||
ENTER("wave_write"); | |||
@@ -915,7 +804,6 @@ size_t wave_write(void* theHandler, char* theMono16BitsWaveBuffer, size_t theSiz | |||
break; | |||
} // end if (aTotalFreeMem >= bytes_to_write) | |||
//SHOW_TIME("wave_write > wait"); | |||
SHOW("wave_write > wait: aTotalFreeMem=%d\n", aTotalFreeMem); | |||
SHOW("wave_write > aRead=%x, myWrite=%x\n", (int)aRead, (int)myWrite); | |||
usleep(10000); | |||
@@ -973,9 +861,6 @@ size_t wave_write(void* theHandler, char* theMono16BitsWaveBuffer, size_t theSiz | |||
return bytes_written; | |||
} | |||
//> | |||
//<wave_close | |||
int wave_close(void* theHandler) | |||
{ | |||
SHOW_TIME("wave_close > ENTER"); | |||
@@ -1083,42 +968,6 @@ int wave_close(void* theHandler) | |||
return 0; | |||
} | |||
// int wave_close(void* theHandler) | |||
// { | |||
// ENTER("wave_close"); | |||
// if(pa_stream != NULL) | |||
// { | |||
// PaError err = Pa_AbortStream(pa_stream); | |||
// SHOW_TIME("wave_close > Pa_AbortStream (end)"); | |||
// SHOW("wave_close Pa_AbortStream > err=%d\n",err); | |||
// while(1) | |||
// { | |||
// PaError active; | |||
// #if USE_PORTAUDIO == 18 | |||
// active = Pa_StreamActive(pa_stream); | |||
// #else | |||
// active = Pa_IsStreamActive(pa_stream); | |||
// #endif | |||
// if (active != 1) | |||
// { | |||
// break; | |||
// } | |||
// SHOW("wave_close > active=%d\n",err); | |||
// usleep(10000); /* sleep until playback has finished */ | |||
// } | |||
// err = Pa_CloseStream( pa_stream ); | |||
// SHOW_TIME("wave_close > Pa_CloseStream (end)"); | |||
// SHOW("wave_close Pa_CloseStream > err=%d\n",err); | |||
// pa_stream = NULL; | |||
// init_buffer(); | |||
// } | |||
// return 0; | |||
// } | |||
//> | |||
//<wave_is_busy | |||
int wave_is_busy(void* theHandler) | |||
{ | |||
PaError active=0; | |||
@@ -1142,9 +991,6 @@ int wave_is_busy(void* theHandler) | |||
return (active==1); | |||
} | |||
//> | |||
//<wave_terminate | |||
void wave_terminate() | |||
{ | |||
ENTER("wave_terminate"); | |||
@@ -1153,9 +999,6 @@ void wave_terminate() | |||
} | |||
//> | |||
//<wave_get_read_position, wave_get_write_position, wave_get_remaining_time | |||
uint32_t wave_get_read_position(void* theHandler) | |||
{ | |||
SHOW("wave_get_read_position > myReadPosition=%u\n", myReadPosition); | |||
@@ -1196,9 +1039,6 @@ int wave_get_remaining_time(uint32_t sample, uint32_t* time) | |||
return 0; | |||
} | |||
//> | |||
//<wave_test_get_write_buffer | |||
void *wave_test_get_write_buffer() | |||
{ | |||
return myWrite; | |||
@@ -1206,7 +1046,6 @@ void *wave_test_get_write_buffer() | |||
#else | |||
// notdef USE_PORTAUDIO | |||
int wave_init(int srate) {return 1;} | |||
@@ -1231,9 +1070,6 @@ int wave_get_remaining_time(uint32_t sample, uint32_t* time) | |||
#endif // of USE_PORTAUDIO | |||
//> | |||
//<clock_gettime2, add_time_in_ms | |||
void clock_gettime2(struct timespec *ts) | |||
{ | |||
struct timeval tv; | |||
@@ -1267,5 +1103,3 @@ void add_time_in_ms(struct timespec *ts, int time_in_ms) | |||
#endif // USE_ASYNC | |||
//> |
@@ -13,7 +13,6 @@ extern "C" | |||
extern int option_device_number; | |||
extern int wave_init(int samplerate); | |||
// TBD: the arg could be "alsa", "oss",... | |||
extern void* wave_open(const char* the_api); | |||
extern size_t wave_write(void* theHandler, char* theMono16BitsWaveBuffer, size_t theSize); |
@@ -47,16 +47,9 @@ | |||
#include "wave.h" | |||
#include "debug.h" | |||
//<Definitions | |||
enum {ONE_BILLION=1000000000}; | |||
enum { | |||
// /* 100ms. | |||
// If a greater value is set (several seconds), | |||
// please update _pulse_timeout_start accordingly */ | |||
// PULSE_TIMEOUT_IN_USEC = 100000, | |||
/* return value */ | |||
PULSE_OK = 0, | |||
PULSE_ERROR = -1, | |||
@@ -144,26 +137,6 @@ do { \ | |||
if (!connected){ SHOW("CHECK_CONNECTED_NO_RETVAL: !pulse_connected\n", ""); return; } \ | |||
} while (0); | |||
//> | |||
// static void display_timing_info(const pa_timing_info* the_time) | |||
// { | |||
// const struct timeval *tv=&(the_time->timestamp); | |||
// SHOW_TIME("ti>"); | |||
// SHOW("ti> timestamp=%03d.%03dms\n",(int)(tv->tv_sec%1000), (int)(tv->tv_usec/1000)); | |||
// SHOW("ti> synchronized_clocks=%d\n",the_time->synchronized_clocks); | |||
// SHOW("ti> sink_usec=%ld\n",the_time->sink_usec); | |||
// SHOW("ti> source_usec=%ld\n",the_time->source_usec); | |||
// SHOW("ti> transport=%ld\n",the_time->transport_usec); | |||
// SHOW("ti> playing=%d\n",the_time->playing); | |||
// SHOW("ti> write_index_corrupt=%d\n",the_time->write_index_corrupt); | |||
// SHOW("ti> write_index=0x%lx\n",the_time->write_index); | |||
// SHOW("ti> read_index_corrupt=%d\n",the_time->read_index_corrupt); | |||
// SHOW("ti> read_index=0x%lx\n",the_time->read_index); | |||
// } | |||
static void subscribe_cb(struct pa_context *c, enum pa_subscription_event_type t, uint32_t index, void *userdata) { | |||
ENTER(__FUNCTION__); | |||
@@ -241,7 +214,6 @@ static void stream_request_cb(pa_stream *s, size_t length, void *userdata) { | |||
} | |||
static void stream_latency_update_cb(pa_stream *s, void *userdata) { | |||
// ENTER(__FUNCTION__); | |||
assert(s); | |||
pa_threaded_mainloop_signal(mainloop, 0); | |||
@@ -330,50 +302,12 @@ static int pulse_playing(const pa_timing_info *the_timing_info) { | |||
r = i->playing; | |||
memcpy((void*)the_timing_info, (void*)i, sizeof(pa_timing_info)); | |||
// display_timing_info(i); | |||
fail: | |||
pa_threaded_mainloop_unlock(mainloop); | |||
return r; | |||
} | |||
// static void pulse_flush(int time) { | |||
// ENTER(__FUNCTION__); | |||
// pa_operation *o = NULL; | |||
// int success = 0; | |||
// CHECK_CONNECTED(); | |||
// pa_threaded_mainloop_lock(mainloop); | |||
// CHECK_DEAD_GOTO(fail, 1); | |||
// if (!(o = pa_stream_flush(stream, stream_success_cb, &success))) { | |||
// SHOW("pa_stream_flush() failed: %s", pa_strerror(pa_context_errno(context))); | |||
// goto fail; | |||
// } | |||
// while (pa_operation_get_state(o) != PA_OPERATION_DONE) { | |||
// CHECK_DEAD_GOTO(fail, 1); | |||
// pa_threaded_mainloop_wait(mainloop); | |||
// } | |||
// if (!success) | |||
// SHOW("pa_stream_flush() failed: %s", pa_strerror(pa_context_errno(context))); | |||
// written = (uint64_t) (((double) time * pa_bytes_per_second(pa_stream_get_sample_spec(stream))) / 1000); | |||
// just_flushed = 1; | |||
// time_offset_msec = time; | |||
// fail: | |||
// if (o) | |||
// pa_operation_unref(o); | |||
// pa_threaded_mainloop_unlock(mainloop); | |||
// } | |||
static void pulse_write(void* ptr, int length) { | |||
ENTER(__FUNCTION__); | |||
@@ -610,8 +544,6 @@ unlock_and_fail: | |||
fail: | |||
// pulse_close(); | |||
if (ret == PULSE_NO_CONNECTION) { | |||
if (context) { | |||
SHOW_TIME("pa_context_disconnect (call)"); | |||
@@ -639,29 +571,13 @@ fail: | |||
void wave_flush(void* theHandler) | |||
{ | |||
ENTER("wave_flush"); | |||
// if (my_stream_could_start) | |||
// { | |||
// // #define buf 1024 | |||
// // static char a_buffer[buf*2]; | |||
// // memset(a_buffer,0,buf*2); | |||
// // wave_write(theHandler, a_buffer, buf*2); | |||
// start_stream(); | |||
// } | |||
} | |||
//<wave_set_callback_is_output_enabled | |||
void wave_set_callback_is_output_enabled(t_wave_callback* cb) | |||
{ | |||
my_callback_is_output_enabled = cb; | |||
} | |||
//> | |||
//<wave_init | |||
int wave_init(int srate) | |||
{ | |||
ENTER("wave_init"); | |||
@@ -672,18 +588,12 @@ int wave_init(int srate) | |||
return pulse_open() == PULSE_OK; | |||
} | |||
//> | |||
//<wave_open | |||
void* wave_open(const char* the_api) | |||
{ | |||
ENTER("wave_open"); | |||
return((void*)1); | |||
} | |||
//> | |||
//<wave_write | |||
size_t wave_write(void* theHandler, char* theMono16BitsWaveBuffer, size_t theSize) | |||
{ | |||
ENTER("wave_write"); | |||
@@ -741,9 +651,6 @@ size_t wave_write(void* theHandler, char* theMono16BitsWaveBuffer, size_t theSiz | |||
return theSize; | |||
} | |||
//> | |||
//<wave_close | |||
int wave_close(void* theHandler) | |||
{ | |||
SHOW_TIME("wave_close > ENTER"); | |||
@@ -775,9 +682,6 @@ int wave_close(void* theHandler) | |||
return PULSE_OK; | |||
} | |||
//> | |||
//<wave_is_busy | |||
int wave_is_busy(void* theHandler) | |||
{ | |||
SHOW_TIME("wave_is_busy"); | |||
@@ -788,15 +692,10 @@ int wave_is_busy(void* theHandler) | |||
return active; | |||
} | |||
//> | |||
//<wave_terminate | |||
void wave_terminate() | |||
{ | |||
ENTER("wave_terminate"); | |||
// Pa_Terminate(); | |||
int a_status; | |||
pthread_mutex_t* a_mutex = NULL; | |||
a_mutex = &pulse_mutex; | |||
@@ -809,9 +708,6 @@ void wave_terminate() | |||
pthread_mutex_destroy(a_mutex); | |||
} | |||
//> | |||
//<wave_get_read_position, wave_get_write_position, wave_get_remaining_time | |||
uint32_t wave_get_read_position(void* theHandler) | |||
{ | |||
pa_timing_info a_timing_info; | |||
@@ -859,9 +755,6 @@ int wave_get_remaining_time(uint32_t sample, uint32_t* time) | |||
return 0; | |||
} | |||
//> | |||
//<wave_test_get_write_buffer | |||
void *wave_test_get_write_buffer() | |||
{ | |||
return NULL; | |||
@@ -869,7 +762,6 @@ void *wave_test_get_write_buffer() | |||
#else | |||
// notdef USE_PULSEAUDIO | |||
int wave_init(return 1;) {} | |||
@@ -895,8 +787,6 @@ int wave_get_remaining_time(uint32_t sample, uint32_t* time) | |||
#endif // of USE_PULSEAUDIO | |||
#ifndef USE_PORTAUDIO | |||
//> | |||
//<clock_gettime2, add_time_in_ms | |||
void clock_gettime2(struct timespec *ts) | |||
{ | |||
@@ -932,5 +822,3 @@ void add_time_in_ms(struct timespec *ts, int time_in_ms) | |||
#endif // USE_ASYNC | |||
//> |
@@ -64,7 +64,7 @@ static uint32_t total_samples_skipped; | |||
static uint32_t last_play_position=0; | |||
static uint32_t wave_samplerate; | |||
//> | |||
// wave_init | |||
// | |||
// DESCRIPTION: | |||
@@ -76,8 +76,6 @@ static uint32_t wave_samplerate; | |||
// sun_audio_fd: modified to hold the file descriptor of the opened | |||
// audio device. | |||
// | |||
//<wave_init | |||
int wave_init(int srate) { | |||
ENTER("wave_init"); | |||
@@ -122,7 +120,6 @@ int wave_init(int srate) { | |||
return(1); | |||
} | |||
//> | |||
// wave_open | |||
// | |||
// DESCRIPTION: | |||
@@ -145,15 +142,12 @@ int wave_init(int srate) { | |||
// sun_audio_fd opened in wave_init, which is passed in as theHandler | |||
// parameter in all other methods | |||
// | |||
//<wave_open | |||
void* wave_open(const char* the_api) | |||
{ | |||
ENTER("wave_open"); | |||
return((void*) sun_audio_fd); | |||
} | |||
//> | |||
// wave_write | |||
// | |||
// DESCRIPTION: | |||
@@ -180,8 +174,6 @@ void* wave_open(const char* the_api) | |||
// | |||
// the number of bytes (not 16-bit samples) sent | |||
// | |||
//<wave_write | |||
size_t wave_write(void* theHandler, | |||
char* theMono16BitsWaveBuffer, | |||
size_t theSize) | |||
@@ -229,7 +221,6 @@ size_t wave_write(void* theHandler, | |||
return num; | |||
} | |||
//> | |||
// wave_close | |||
// | |||
// DESCRIPTION: | |||
@@ -257,8 +248,6 @@ size_t wave_write(void* theHandler, | |||
// | |||
// The result of the ioctl call (non-0 means failure) | |||
// | |||
//<wave_close | |||
int wave_close(void* theHandler) | |||
{ | |||
int ret; | |||
@@ -285,7 +274,6 @@ int wave_close(void* theHandler) | |||
return ret; | |||
} | |||
//> | |||
// wave_is_busy | |||
// | |||
// DESCRIPTION: | |||
@@ -305,8 +293,6 @@ int wave_close(void* theHandler) | |||
// | |||
// A non-0 value if audio is being played | |||
// | |||
//<wave_is_busy | |||
int wave_is_busy(void* theHandler) | |||
{ | |||
uint32_t time; | |||
@@ -318,7 +304,6 @@ int wave_is_busy(void* theHandler) | |||
return time != 0; | |||
} | |||
//> | |||
// wave_terminate | |||
// | |||
// DESCRIPTION: | |||
@@ -329,8 +314,6 @@ int wave_is_busy(void* theHandler) | |||
// | |||
// sun_audio_fd: modified - closed and set to -1 | |||
// | |||
//<wave_terminate | |||
void wave_terminate() | |||
{ | |||
ENTER("wave_terminate"); | |||
@@ -339,7 +322,6 @@ void wave_terminate() | |||
SHOW_TIME("wave_terminate > LEAVE"); | |||
} | |||
//> | |||
// wave_flush | |||
// | |||
// DESCRIPTION: | |||
@@ -355,16 +337,12 @@ void wave_terminate() | |||
// | |||
// theHandler: the audio device file descriptor | |||
// | |||
//<wave_flush | |||
void wave_flush(void* theHandler) | |||
{ | |||
ENTER("wave_flush"); | |||
//ioctl((int) theHandler, AUDIO_DRAIN, 0); | |||
SHOW_TIME("wave_flush > LEAVE"); | |||
} | |||
//> | |||
// wave_set_callback_is_output_enabled | |||
// | |||
// DESCRIPTION: | |||
@@ -377,14 +355,11 @@ void wave_flush(void* theHandler) | |||
// | |||
// cb: the callback to call from wave_write | |||
// | |||
//<wave_set_callback_is_output_enabled | |||
void wave_set_callback_is_output_enabled(t_wave_callback* cb) | |||
{ | |||
my_callback_is_output_enabled = cb; | |||
} | |||
//> | |||
// wave_test_get_write_buffer | |||
// | |||
// DESCRIPTION: | |||
@@ -396,14 +371,11 @@ void wave_set_callback_is_output_enabled(t_wave_callback* cb) | |||
// | |||
// NULL | |||
// | |||
//<wave_test_get_write_buffer | |||
void *wave_test_get_write_buffer() | |||
{ | |||
return NULL; | |||
} | |||
//> | |||
// wave_get_read_position | |||
// | |||
// DESCRIPTION: | |||
@@ -423,8 +395,6 @@ void *wave_test_get_write_buffer() | |||
// The total number of 16-bit samples played by the audio system | |||
// so far. | |||
// | |||
//<wave_get_read_position | |||
uint32_t wave_get_read_position(void* theHandler) | |||
{ | |||
audio_info_t ainfo; | |||
@@ -435,7 +405,6 @@ uint32_t wave_get_read_position(void* theHandler) | |||
return ainfo.play.samples; | |||
} | |||
//> | |||
// wave_get_write_position | |||
// | |||
// DESCRIPTION: | |||
@@ -462,8 +431,6 @@ uint32_t wave_get_read_position(void* theHandler) | |||
// the index wraps back to 0. We don't handle that wrapping, so | |||
// the behavior after 54 hours of play time is undefined.]]] | |||
// | |||
//<wave_get_write_position | |||
uint32_t wave_get_write_position(void* theHandler) | |||
{ | |||
ENTER("wave_get_write_position"); | |||
@@ -472,7 +439,6 @@ uint32_t wave_get_write_position(void* theHandler) | |||
return total_samples_sent; | |||
} | |||
//> | |||
// wave_get_remaining_time | |||
// | |||
// DESCRIPTION: | |||
@@ -500,8 +466,6 @@ uint32_t wave_get_write_position(void* theHandler) | |||
// Time in milliseconds before the sample is played or 0 if the sample | |||
// is currently playing or has already been played. | |||
// | |||
//<wave_get_remaining_time | |||
int wave_get_remaining_time(uint32_t sample, uint32_t* time) | |||
{ | |||
uint32_t a_time=0; | |||
@@ -533,7 +497,6 @@ int wave_get_remaining_time(uint32_t sample, uint32_t* time) | |||
} | |||
#else | |||
// notdef USE_SADA | |||
init wave_init() {return 1;} | |||
void* wave_open(const char* the_api) {return (void *)1;} | |||
@@ -557,9 +520,6 @@ int wave_get_remaining_time(uint32_t sample, uint32_t* time) | |||
#endif // of USE_PORTAUDIO | |||
//> | |||
//<clock_gettime2, add_time_in_ms | |||
void clock_gettime2(struct timespec *ts) | |||
{ | |||
struct timeval tv; | |||
@@ -592,5 +552,3 @@ void add_time_in_ms(struct timespec *ts, int time_in_ms) | |||
} | |||
#endif // USE_ASYNC | |||
//> |
@@ -851,7 +851,6 @@ int PeaksToHarmspect(wavegen_peaks_t *peaks, int pitch, int *htab, int control) | |||
// restrict highest harmonic to half the samplerate | |||
hmax_samplerate = (((samplerate * 19)/40) << 16)/pitch; // only 95% of Nyquist freq | |||
// hmax_samplerate = (samplerate << 16)/(pitch*2); | |||
if(hmax > hmax_samplerate) | |||
hmax = hmax_samplerate; | |||
@@ -1047,19 +1046,14 @@ static void setresonator(RESONATOR *rp, int freq, int bwidth, int init) | |||
rp->x2 = 0; | |||
} | |||
// x = exp(-pi * bwidth * t) | |||
arg = minus_pi_t * bwidth; | |||
x = exp(arg); | |||
// c = -(x*x) | |||
rp->c = -(x * x); | |||
// b = x * 2*cos(2 pi * freq * t) | |||
arg = two_pi_t * freq; | |||
rp->b = x * cos(arg) * 2.0; | |||
// a = 1.0 - b - c | |||
rp->a = 1.0 - rp->b - rp->c; | |||
} // end if setresonator | |||
#endif | |||
@@ -1167,7 +1161,6 @@ int Wavegen() | |||
maxh2 = PeaksToHarmspect(peaks, wdata.pitch<<4, hspect[0], 0); | |||
// adjust amplitude to compensate for fewer harmonics at higher pitch | |||
// amplitude2 = (wdata.amplitude * wdata.pitch)/(100 << 11); | |||
amplitude2 = (wdata.amplitude * (wdata.pitch >> 8) * wdata.amplitude_fmt)/(10000 << 3); | |||
// switch sign of harmonics above about 900Hz, to reduce max peak amplitude | |||
@@ -1221,7 +1214,6 @@ int Wavegen() | |||
} | |||
// adjust amplitude to compensate for fewer harmonics at higher pitch | |||
// amplitude2 = (wdata.amplitude * wdata.pitch)/(100 << 11); | |||
amplitude2 = (wdata.amplitude * (wdata.pitch >> 8) * wdata.amplitude_fmt)/(10000 << 3); | |||
if(glottal_flag > 0) | |||
@@ -1256,8 +1248,6 @@ int Wavegen() | |||
if((ix = amp_ix>>8) > 127) ix = 127; | |||
amp = amplitude_env[ix]; | |||
amplitude2 = (amplitude2 * amp)/128; | |||
// if(amp < 255) | |||
// modulation_type = 7; | |||
} | |||
// introduce roughness into the sound by reducing the amplitude of | |||
@@ -1602,7 +1592,6 @@ void WavegenSetVoice(voice_t *v) | |||
WavegenSetEcho(); | |||
SetPitchFormants(); | |||
MarkerEvent(espeakEVENT_SAMPLERATE, 0, wvoice->samplerate, 0, out_ptr); | |||
// WVoiceChanged(wvoice); | |||
} | |||
@@ -1698,7 +1687,6 @@ void SetSynth(int length, int modn, frame_t *fr1, frame_t *fr2, voice_t *v) | |||
int qix; | |||
int cmd; | |||
static int glottal_reduce_tab1[4] = {0x30, 0x30, 0x40, 0x50}; // vowel before [?], amp * 1/256 | |||
// static int glottal_reduce_tab1[4] = {0x30, 0x40, 0x50, 0x60}; // vowel before [?], amp * 1/256 | |||
static int glottal_reduce_tab2[4] = {0x90, 0xa0, 0xb0, 0xc0}; // vowel after [?], amp * 1/256 | |||
#ifdef LOG_FRAMES |