if(which==1) | if(which==1) | ||||
{ | { | ||||
// instn = i_VOWELIN; | |||||
len = 50 / 2; // defaults for transition into vowel | len = 50 / 2; // defaults for transition into vowel | ||||
rms = 25 / 2; | rms = 25 / 2; | ||||
scale_factor = (max / 127) + 1; | scale_factor = (max / 127) + 1; | ||||
//fprintf(f_errors," sample len=%d max=%4x shift=%d\n",length,max,scale_factor); | |||||
#define MIN_FACTOR -1 // was 6, disable use of 16 bit samples | #define MIN_FACTOR -1 // was 6, disable use of 16 bit samples | ||||
if(scale_factor > MIN_FACTOR) | if(scale_factor > MIN_FACTOR) | ||||
{ | { | ||||
} | } | ||||
Write4Bytes(f_phdata,length); | Write4Bytes(f_phdata,length); | ||||
// fwrite(&length,4,1,f_phdata); | |||||
fseek(f,44,SEEK_SET); | fseek(f,44,SEEK_SET); | ||||
while(!feof(f)) | while(!feof(f)) | ||||
return(0); | return(0); | ||||
} | } | ||||
// count_VowelStart = 0; | |||||
// count_VowelEnding = 0; | |||||
if(type == 1) | if(type == 1) | ||||
*prog_out++ = i_SWITCH_PREVVOWEL+6; | *prog_out++ = i_SWITCH_PREVVOWEL+6; | ||||
if(type == 2) | if(type == 2) | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
// fprintf(f_errors,"tune %s (%d)\n", new_tune.name, tune_number); | |||||
if(found == 2) | if(found == 2) | ||||
{ | { | ||||
error("Duplicate tune name: '%s'",new_tune.name); | error("Duplicate tune name: '%s'",new_tune.name); |
phonetic = word = nullstring; | phonetic = word = nullstring; | ||||
p = linebuf; | p = linebuf; | ||||
// while(isspace2(*p)) p++; | |||||
step = 0; | step = 0; | ||||
{ | { | ||||
multiple_numeric_hyphen = 1; | multiple_numeric_hyphen = 1; | ||||
} | } | ||||
// else // ??? | |||||
{ | |||||
flag_codes[n_flag_codes++] = BITNUM_FLAG_HYPHENATED; | |||||
} | |||||
flag_codes[n_flag_codes++] = BITNUM_FLAG_HYPHENATED; | |||||
c = ' '; | c = ' '; | ||||
} | } | ||||
if(isspace2(c)) | if(isspace2(c)) | ||||
error_need_dictionary++; | error_need_dictionary++; | ||||
fprintf(f_log,"%5d: Need to compile dictionary again\n",linenum); | fprintf(f_log,"%5d: Need to compile dictionary again\n",linenum); | ||||
} | } | ||||
{ | |||||
//char decoded_phonemes[128]; | |||||
//DecodePhonemes(word_phonemes,decoded_phonemes); | |||||
//printf("Translator %x %s [%s] [%s]\n",translator->translator_name,word,phonetic,decoded_phonemes); | |||||
} | |||||
} | } | ||||
else | else | ||||
{ | { | ||||
dsource = ""; | dsource = ""; | ||||
f_log = log; | f_log = log; | ||||
//f_log = fopen("log2.txt","w"); | |||||
if(f_log == NULL) | if(f_log == NULL) | ||||
f_log = stderr; | f_log = stderr; | ||||
gettimeofday(&tv, NULL); | gettimeofday(&tv, NULL); | ||||
// fd_log = fopen(FILENAME,"a"); | |||||
if (!fd_log) | if (!fd_log) | ||||
{ | { | ||||
debug_init(); | debug_init(); | ||||
if (fd_log) | if (fd_log) | ||||
{ | { | ||||
fprintf(fd_log, "%03d.%03dms > ENTER %s\n",(int)(tv.tv_sec%1000), (int)(tv.tv_usec/1000), text); | fprintf(fd_log, "%03d.%03dms > ENTER %s\n",(int)(tv.tv_sec%1000), (int)(tv.tv_usec/1000), text); | ||||
// fclose(fd_log); | |||||
} | } | ||||
} | } | ||||
{ | { | ||||
va_list args; | va_list args; | ||||
va_start(args, format); | va_start(args, format); | ||||
// fd_log = fopen(FILENAME,"a"); | |||||
if (!fd_log) | if (!fd_log) | ||||
{ | { | ||||
debug_init(); | debug_init(); | ||||
if (fd_log) | if (fd_log) | ||||
{ | { | ||||
vfprintf(fd_log, format, args); | vfprintf(fd_log, format, args); | ||||
// fclose(fd_log); | |||||
} | } | ||||
va_end(args); | va_end(args); | ||||
} | } | ||||
gettimeofday(&tv, NULL); | gettimeofday(&tv, NULL); | ||||
// fd_log = fopen(FILENAME,"a"); | |||||
if (!fd_log) | if (!fd_log) | ||||
{ | { | ||||
debug_init(); | debug_init(); | ||||
if (fd_log) | if (fd_log) | ||||
{ | { | ||||
fprintf(fd_log, "%03d.%03dms > %s\n",(int)(tv.tv_sec%1000), (int)(tv.tv_usec/1000), text); | fprintf(fd_log, "%03d.%03dms > %s\n",(int)(tv.tv_sec%1000), (int)(tv.tv_usec/1000), text); | ||||
// fclose(fd_log); | |||||
} | } | ||||
} | } | ||||
{ | { | ||||
#endif | #endif | ||||
//#define DEBUG_ENABLED | |||||
#ifdef DEBUG_ENABLED | #ifdef DEBUG_ENABLED | ||||
#define ENTER(text) debug_enter(text) | #define ENTER(text) debug_enter(text) | ||||
#define SHOW(format,...) debug_show(format,__VA_ARGS__); | #define SHOW(format,...) debug_show(format,__VA_ARGS__); |
extern char *DecodeRule(const char *group_chars, int group_length, char *rule, int control); | extern char *DecodeRule(const char *group_chars, int group_length, char *rule, int control); | ||||
// accented characters which indicate (in some languages) the start of a separate syllable | // accented characters which indicate (in some languages) the start of a separate syllable | ||||
//static const unsigned short diereses_list[7] = {L'ä',L'ë',L'ï',L'ö',L'ü',L'ÿ',0}; | |||||
static const unsigned short diereses_list[7] = {0xe4,0xeb,0xef,0xf6,0xfc,0xff,0}; | static const unsigned short diereses_list[7] = {0xe4,0xeb,0xef,0xf6,0xfc,0xff,0}; | ||||
// convert characters to an approximate 7 bit ascii equivalent | // convert characters to an approximate 7 bit ascii equivalent | ||||
if(c != 0) | if(c != 0) | ||||
{ | { | ||||
buf += utf8_out(c, buf); | buf += utf8_out(c, buf); | ||||
// if(separate_phonemes) | |||||
// *buf++ = separate_phonemes; | |||||
} | } | ||||
} | } | ||||
} | } | ||||
{ | { | ||||
int wt; | int wt; | ||||
int max_weight = -1; | int max_weight = -1; | ||||
// int prev_stressed; | |||||
// find the heaviest syllable, excluding the final syllable | // find the heaviest syllable, excluding the final syllable | ||||
for(ix = 1; ix < (vowel_count-1); ix++) | for(ix = 1; ix < (vowel_count-1); ix++) | ||||
if((wt = syllable_weight[ix]) >= max_weight) | if((wt = syllable_weight[ix]) >= max_weight) | ||||
{ | { | ||||
max_weight = wt; | max_weight = wt; | ||||
// prev_stressed = stressed_syllable; | |||||
stressed_syllable = ix; | stressed_syllable = ix; | ||||
} | } | ||||
} | } | ||||
if((ph = phoneme_tab[phcode]) == NULL) | if((ph = phoneme_tab[phcode]) == NULL) | ||||
continue; | continue; | ||||
// if(ph->type == phSTRESS) | |||||
// continue; | |||||
if(ph->type == phPAUSE) | if(ph->type == phPAUSE) | ||||
{ | { | ||||
tr->prev_last_stress = 0; | tr->prev_last_stress = 0; | ||||
for(ix=0; ix <= skipwords; ix++) | for(ix=0; ix <= skipwords; ix++) | ||||
{ | { | ||||
if(wtab[ix].flags & FLAG_EMPHASIZED2) | if(wtab[ix].flags & FLAG_EMPHASIZED2) | ||||
// if(((wflags2 = wtab[ix].flags) & FLAG_EMPHASIZED2) || ((ix > 0) && (wflags2 & FLAG_EMBEDDED))) | |||||
{ | { | ||||
condition_failed = 1; | condition_failed = 1; | ||||
} | } | ||||
fprintf(f_trans,"Replace: %s %s\n",word,*wordptr); | fprintf(f_trans,"Replace: %s %s\n",word,*wordptr); | ||||
} | } | ||||
} | } | ||||
else | |||||
{ | |||||
// flags[0] &= ~FLAG_SKIPWORDS; // check lang=hu január 21.-ig (error: suffix repeated ??) | |||||
} | |||||
ph_out[0] = 0; | ph_out[0] = 0; | ||||
return(0); | return(0); | ||||
}; | }; | ||||
static const char *add_e_additions[] = { | static const char *add_e_additions[] = { | ||||
// "c", "rs", "ir", "ur", "ath", "ns", "lu", NULL }; | |||||
"c", "rs", "ir", "ur", "ath", "ns", "u", NULL | "c", "rs", "ir", "ur", "ath", "ns", "u", NULL | ||||
}; | }; | ||||
if((strcmp(ending,"s")==0) || (strcmp(ending,"es")==0)) | if((strcmp(ending,"s")==0) || (strcmp(ending,"es")==0)) | ||||
end_flags |= FLAG_SUFX_S; | end_flags |= FLAG_SUFX_S; | ||||
// if(strcmp(ending,"'s")==0) | |||||
if(ending[0] == '\'') | if(ending[0] == '\'') | ||||
end_flags &= ~FLAG_SUFX; // don't consider 's as an added suffix | end_flags &= ~FLAG_SUFX; // don't consider 's as an added suffix | ||||
#include "debug.h" | #include "debug.h" | ||||
static unsigned int my_current_text_id=0; | static unsigned int my_current_text_id=0; | ||||
//<create_espeak_text | |||||
t_espeak_command* create_espeak_text(const void *text, size_t size, unsigned int position, espeak_POSITION_TYPE position_type, unsigned int end_position, unsigned int flags, void* user_data) | t_espeak_command* create_espeak_text(const void *text, size_t size, unsigned int position, espeak_POSITION_TYPE position_type, unsigned int end_position, unsigned int flags, void* user_data) | ||||
{ | { | ||||
ENTER("create_espeak_text"); | ENTER("create_espeak_text"); | ||||
return a_command; | return a_command; | ||||
} | } | ||||
//> | |||||
t_espeak_command* create_espeak_terminated_msg(unsigned int unique_identifier, void* user_data) | t_espeak_command* create_espeak_terminated_msg(unsigned int unique_identifier, void* user_data) | ||||
{ | { | ||||
ENTER("create_espeak_terminated_msg"); | ENTER("create_espeak_terminated_msg"); | ||||
} | } | ||||
//<create_espeak_mark | |||||
t_espeak_command* create_espeak_mark(const void *text, size_t size, const char *index_mark, unsigned int end_position, unsigned int flags, void* user_data) | t_espeak_command* create_espeak_mark(const void *text, size_t size, const char *index_mark, unsigned int end_position, unsigned int flags, void* user_data) | ||||
{ | { | ||||
ENTER("create_espeak_mark"); | ENTER("create_espeak_mark"); | ||||
return a_command; | return a_command; | ||||
} | } | ||||
//> | |||||
//< create_espeak_key, create_espeak_char | |||||
t_espeak_command* create_espeak_key(const char *key_name, void *user_data) | t_espeak_command* create_espeak_key(const char *key_name, void *user_data) | ||||
{ | { | ||||
return a_command; | return a_command; | ||||
} | } | ||||
//> | |||||
//< create_espeak_parameter | |||||
t_espeak_command* create_espeak_parameter(espeak_PARAMETER parameter, int value, int relative) | t_espeak_command* create_espeak_parameter(espeak_PARAMETER parameter, int value, int relative) | ||||
{ | { | ||||
ENTER("create_espeak_parameter"); | ENTER("create_espeak_parameter"); | ||||
return a_command; | return a_command; | ||||
} | } | ||||
//> | |||||
//< create_espeak_punctuation_list | |||||
t_espeak_command* create_espeak_punctuation_list(const wchar_t *punctlist) | t_espeak_command* create_espeak_punctuation_list(const wchar_t *punctlist) | ||||
{ | { | ||||
ENTER("create_espeak_punctuation_list"); | ENTER("create_espeak_punctuation_list"); | ||||
int a_error=1; | int a_error=1; | ||||
// wchar_t *a_list = NULL; | |||||
t_espeak_command* a_command = (t_espeak_command*)malloc(sizeof(t_espeak_command)); | t_espeak_command* a_command = (t_espeak_command*)malloc(sizeof(t_espeak_command)); | ||||
if (!punctlist || !a_command) | if (!punctlist || !a_command) | ||||
return a_command; | return a_command; | ||||
} | } | ||||
//> | |||||
//< create_espeak_voice_name, create_espeak_voice_spec | |||||
t_espeak_command* create_espeak_voice_name(const char *name) | t_espeak_command* create_espeak_voice_name(const char *name) | ||||
{ | { | ||||
ENTER("create_espeak_voice_name"); | ENTER("create_espeak_voice_name"); | ||||
return a_command; | return a_command; | ||||
} | } | ||||
//> | |||||
//< delete_espeak_command | |||||
int delete_espeak_command( t_espeak_command* the_command) | int delete_espeak_command( t_espeak_command* the_command) | ||||
{ | { | ||||
ENTER("delete_espeak_command"); | ENTER("delete_espeak_command"); | ||||
} | } | ||||
return a_status; | return a_status; | ||||
} | } | ||||
//> | |||||
//< process_espeak_command | |||||
void process_espeak_command( t_espeak_command* the_command) | void process_espeak_command( t_espeak_command* the_command) | ||||
{ | { | ||||
ENTER("process_espeak_command"); | ENTER("process_espeak_command"); | ||||
} | } | ||||
} | } | ||||
//> | |||||
//< process_espeak_command | |||||
void display_espeak_command( t_espeak_command* the_command) | void display_espeak_command( t_espeak_command* the_command) | ||||
{ | { | ||||
ENTER("display_espeak_command"); | ENTER("display_espeak_command"); | ||||
} | } | ||||
#endif | #endif | ||||
} | } | ||||
//> |
// This source file is only used for asynchronious modes | // This source file is only used for asynchronious modes | ||||
//<includes | |||||
#ifndef PLATFORM_WINDOWS | #ifndef PLATFORM_WINDOWS | ||||
#include <unistd.h> | #include <unistd.h> | ||||
#endif | #endif | ||||
#include "event.h" | #include "event.h" | ||||
#include "wave.h" | #include "wave.h" | ||||
#include "debug.h" | #include "debug.h" | ||||
//> | |||||
//<decls and function prototypes | |||||
// my_mutex: protects my_thread_is_talking, | // my_mutex: protects my_thread_is_talking, | ||||
static pthread_mutex_t my_mutex; | static pthread_mutex_t my_mutex; | ||||
static void init(); | static void init(); | ||||
static void* polling_thread(void*); | static void* polling_thread(void*); | ||||
//> | |||||
//<event_init | |||||
void event_set_callback(t_espeak_callback* SynthCallback) | void event_set_callback(t_espeak_callback* SynthCallback) | ||||
{ | { | ||||
my_callback = SynthCallback; | my_callback = SynthCallback; | ||||
assert(thread_inited); | assert(thread_inited); | ||||
pthread_attr_destroy(&a_attrib); | pthread_attr_destroy(&a_attrib); | ||||
} | } | ||||
//> | |||||
//<event_display | |||||
static void event_display(espeak_EVENT* event) | static void event_display(espeak_EVENT* event) | ||||
{ | { | ||||
ENTER("event_display"); | ENTER("event_display"); | ||||
} | } | ||||
#endif | #endif | ||||
} | } | ||||
//> | |||||
//<event_copy | |||||
static espeak_EVENT* event_copy (espeak_EVENT* event) | static espeak_EVENT* event_copy (espeak_EVENT* event) | ||||
{ | { | ||||
return a_event; | return a_event; | ||||
} | } | ||||
//> | |||||
//<event_notify | |||||
// Call the user supplied callback | // Call the user supplied callback | ||||
// | // | ||||
// Note: the current sequence is: | // Note: the current sequence is: | ||||
case espeakEVENT_END: | case espeakEVENT_END: | ||||
case espeakEVENT_PHONEME: | case espeakEVENT_PHONEME: | ||||
{ | { | ||||
// jonsd - I'm not sure what this is for. gilles says it's for when Gnome Speech reads a file of blank lines | |||||
if (a_old_uid != event->unique_identifier) | if (a_old_uid != event->unique_identifier) | ||||
{ | { | ||||
espeak_EVENT_TYPE a_new_type = events[0].type; | espeak_EVENT_TYPE a_new_type = events[0].type; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
//> | |||||
//<event_delete | |||||
static int event_delete(espeak_EVENT* event) | static int event_delete(espeak_EVENT* event) | ||||
{ | { | ||||
return 1; | return 1; | ||||
} | } | ||||
//> | |||||
//<event_declare | |||||
espeak_ERROR event_declare (espeak_EVENT* event) | espeak_ERROR event_declare (espeak_EVENT* event) | ||||
{ | { | ||||
ENTER("event_declare"); | ENTER("event_declare"); | ||||
a_status = pthread_mutex_unlock(&my_mutex); | a_status = pthread_mutex_unlock(&my_mutex); | ||||
} | } | ||||
// TBD: remove the comment | |||||
// reminder: code in comment. | |||||
// This wait can lead to an underrun | |||||
// | |||||
// if (!a_status && !my_event_is_running && (a_error == EE_OK)) | |||||
// { | |||||
// // quit when command is actually started | |||||
// // (for possible forthcoming 'end of command' checks) | |||||
SHOW_TIME("event_declare > post my_sem_start_is_required\n"); | SHOW_TIME("event_declare > post my_sem_start_is_required\n"); | ||||
sem_post(&my_sem_start_is_required); | sem_post(&my_sem_start_is_required); | ||||
// int val=1; | |||||
// while (val) | |||||
// { | |||||
// usleep(50000); // TBD: event? | |||||
// sem_getvalue(&my_sem_start_is_required, &val); | |||||
// } | |||||
// } | |||||
if (a_status != 0) | if (a_status != 0) | ||||
{ | { | ||||
return a_error; | return a_error; | ||||
} | } | ||||
//> | |||||
//<event_clear_all | |||||
espeak_ERROR event_clear_all () | espeak_ERROR event_clear_all () | ||||
{ | { | ||||
ENTER("event_clear_all"); | ENTER("event_clear_all"); | ||||
return EE_OK; | return EE_OK; | ||||
} | } | ||||
//> | |||||
//<sleep_until_timeout_or_stop_request | |||||
static int sleep_until_timeout_or_stop_request(uint32_t time_in_ms) | static int sleep_until_timeout_or_stop_request(uint32_t time_in_ms) | ||||
{ | { | ||||
ENTER("sleep_until_timeout_or_stop_request"); | ENTER("sleep_until_timeout_or_stop_request"); | ||||
return a_stop_is_required; | return a_stop_is_required; | ||||
} | } | ||||
//> | |||||
//<get_remaining_time | |||||
// Asked for the time interval required for reaching the sample. | // Asked for the time interval required for reaching the sample. | ||||
// If the stream is opened but the audio samples are not played, | // If the stream is opened but the audio samples are not played, | ||||
// a timeout is started. | // a timeout is started. | ||||
return err; | return err; | ||||
} | } | ||||
//> | |||||
//<polling_thread | |||||
static void* polling_thread(void*p) | static void* polling_thread(void*p) | ||||
{ | { | ||||
ENTER("polling_thread"); | ENTER("polling_thread"); | ||||
return NULL; | return NULL; | ||||
} | } | ||||
//> | |||||
//<push, pop, init | |||||
enum {MAX_NODE_COUNTER=1000}; | enum {MAX_NODE_COUNTER=1000}; | ||||
// return 1 if ok, 0 otherwise | |||||
static espeak_ERROR push(void* the_data) | static espeak_ERROR push(void* the_data) | ||||
{ | { | ||||
ENTER("event > push"); | ENTER("event > push"); | ||||
node_counter = 0; | node_counter = 0; | ||||
} | } | ||||
//> | |||||
//<event_terminate | |||||
void event_terminate() | void event_terminate() | ||||
{ | { | ||||
ENTER("event_terminate"); | ENTER("event_terminate"); | ||||
thread_inited = 0; | thread_inited = 0; | ||||
} | } | ||||
} | } | ||||
//> |
// This source file is only used for asynchronious modes | // This source file is only used for asynchronious modes | ||||
//<includes | |||||
#ifndef PLATFORM_WINDOWS | #ifndef PLATFORM_WINDOWS | ||||
#include <unistd.h> | #include <unistd.h> | ||||
#endif | #endif | ||||
#include "debug.h" | #include "debug.h" | ||||
//> | |||||
//<decls and function prototypes | |||||
// my_mutex: protects my_thread_is_talking, | // my_mutex: protects my_thread_is_talking, | ||||
// my_stop_is_required, and the command fifo | // my_stop_is_required, and the command fifo | ||||
static pthread_mutex_t my_mutex; | static pthread_mutex_t my_mutex; | ||||
MAX_INACTIVITY_CHECK=2 | MAX_INACTIVITY_CHECK=2 | ||||
}; | }; | ||||
//> | |||||
//<fifo_init | |||||
void fifo_init() | void fifo_init() | ||||
{ | { | ||||
ENTER("fifo_init"); | ENTER("fifo_init"); | ||||
} | } | ||||
SHOW_TIME("fifo > get my_sem_stop_is_acknowledged\n"); | SHOW_TIME("fifo > get my_sem_stop_is_acknowledged\n"); | ||||
} | } | ||||
//> | |||||
//<fifo_add_command | |||||
espeak_ERROR fifo_add_command (t_espeak_command* the_command) | espeak_ERROR fifo_add_command (t_espeak_command* the_command) | ||||
{ | { | ||||
return a_error; | return a_error; | ||||
} | } | ||||
//> | |||||
//<fifo_add_commands | |||||
espeak_ERROR fifo_add_commands (t_espeak_command* command1, t_espeak_command* command2) | espeak_ERROR fifo_add_commands (t_espeak_command* command1, t_espeak_command* command2) | ||||
{ | { | ||||
ENTER("fifo_add_command"); | ENTER("fifo_add_command"); | ||||
return a_error; | return a_error; | ||||
} | } | ||||
//> | |||||
//<fifo_stop | |||||
espeak_ERROR fifo_stop () | espeak_ERROR fifo_stop () | ||||
{ | { | ||||
ENTER("fifo_stop"); | ENTER("fifo_stop"); | ||||
return EE_OK; | return EE_OK; | ||||
} | } | ||||
//> | |||||
//<fifo_is_speaking | |||||
int fifo_is_busy () | int fifo_is_busy () | ||||
{ | { | ||||
// ENTER("isSpeaking"); | |||||
// int aResult = (int) (my_command_is_running || WaveIsPlaying()); | |||||
SHOW("fifo_is_busy > aResult = %d\n",my_command_is_running); | SHOW("fifo_is_busy > aResult = %d\n",my_command_is_running); | ||||
return my_command_is_running; | return my_command_is_running; | ||||
} | } | ||||
// int pause () | |||||
// { | |||||
// ENTER("pause"); | |||||
// // TBD | |||||
// // if (espeakPause (espeakHandle, 1)) | |||||
// return true; | |||||
// } | |||||
// int resume () | |||||
// { | |||||
// ENTER("resume"); | |||||
// // TBD | |||||
// // if (espeakPause (espeakHandle, 0)) | |||||
// return true; | |||||
// } | |||||
//> | |||||
//<sleep_until_start_request_or_inactivity | |||||
static int sleep_until_start_request_or_inactivity() | static int sleep_until_start_request_or_inactivity() | ||||
{ | { | ||||
SHOW_TIME("fifo > sleep_until_start_request_or_inactivity > ENTER"); | SHOW_TIME("fifo > sleep_until_start_request_or_inactivity > ENTER"); | ||||
return a_start_is_required; | return a_start_is_required; | ||||
} | } | ||||
//> | |||||
//<close_stream | |||||
static void close_stream() | static void close_stream() | ||||
{ | { | ||||
SHOW_TIME("fifo > close_stream > ENTER\n"); | SHOW_TIME("fifo > close_stream > ENTER\n"); | ||||
SHOW_TIME("fifo > close_stream > LEAVE\n"); | SHOW_TIME("fifo > close_stream > LEAVE\n"); | ||||
} | } | ||||
//> | |||||
//<say_thread | |||||
static void* say_thread(void*p) | static void* say_thread(void*p) | ||||
{ | { | ||||
ENTER("say_thread"); | ENTER("say_thread"); | ||||
return (0 == my_stop_is_required); | return (0 == my_stop_is_required); | ||||
} | } | ||||
//> | |||||
//<fifo | |||||
typedef struct t_node | typedef struct t_node | ||||
{ | { | ||||
t_espeak_command* data; | t_espeak_command* data; | ||||
static node* head=NULL; | static node* head=NULL; | ||||
static node* tail=NULL; | static node* tail=NULL; | ||||
// return 1 if ok, 0 otherwise | |||||
static espeak_ERROR push(t_espeak_command* the_command) | static espeak_ERROR push(t_espeak_command* the_command) | ||||
{ | { | ||||
ENTER("fifo > push"); | ENTER("fifo > push"); | ||||
return the_command; | return the_command; | ||||
} | } | ||||
static void init(int process_parameters) | static void init(int process_parameters) | ||||
{ | { | ||||
// Changed by Tyler Spivey 30.Nov.2011 | |||||
t_espeak_command *c = NULL; | t_espeak_command *c = NULL; | ||||
ENTER("fifo > init"); | ENTER("fifo > init"); | ||||
c = pop(); | c = pop(); | ||||
node_counter = 0; | node_counter = 0; | ||||
} | } | ||||
//> | |||||
//<fifo_init | |||||
void fifo_terminate() | void fifo_terminate() | ||||
{ | { | ||||
ENTER("fifo_terminate"); | ENTER("fifo_terminate"); | ||||
init(0); // purge fifo | init(0); // purge fifo | ||||
} | } | ||||
//> |
syl = &syllable_tab[ix]; | syl = &syllable_tab[ix]; | ||||
stress = syl->stress; | stress = syl->stress; | ||||
// if(stress == PRIMARY_MARKED) | |||||
// initial = 1; // reset the intonation pattern | |||||
if(initial || (stress >= min_stress)) | if(initial || (stress >= min_stress)) | ||||
{ | { | ||||
// a primary stress | // a primary stress | ||||
/* tonic syllable */ | /* tonic syllable */ | ||||
/******************/ | /******************/ | ||||
// if(tn->flags & T_EMPH) | |||||
// { | |||||
// syllable_tab[ix].flags |= SYL_EMPHASIS; | |||||
// } | |||||
if(number_tail == 0) | if(number_tail == 0) | ||||
{ | { | ||||
tone_pitch_env = tune->nucleus0_env; | tone_pitch_env = tune->nucleus0_env; | ||||
tone_posn = tone_posn2; // put tone on the penultimate stressed word | tone_posn = tone_posn2; // put tone on the penultimate stressed word | ||||
} | } | ||||
ix = calc_pitch_segment(ix,tone_posn, th, tn, PRIMARY, continuing); | ix = calc_pitch_segment(ix,tone_posn, th, tn, PRIMARY, continuing); | ||||
// ix = SetBodyIntonation(&tunes[0], ix, tone_posn, 0); | |||||
if(no_tonic) | if(no_tonic) | ||||
return(0); | return(0); | ||||
PHONEME_TAB *tph; | PHONEME_TAB *tph; | ||||
PHONEME_TAB *prev_tph; // forget across word boundary | PHONEME_TAB *prev_tph; // forget across word boundary | ||||
PHONEME_TAB *prevw_tph; // remember across word boundary | PHONEME_TAB *prevw_tph; // remember across word boundary | ||||
// PHONEME_TAB *prev2_tph; // 2 tones previous | |||||
PHONEME_LIST *prev_p; | PHONEME_LIST *prev_p; | ||||
int pitch_adjust = 0; // pitch gradient through the clause - inital value | int pitch_adjust = 0; // pitch gradient through the clause - inital value | ||||
} | } | ||||
prev_p = p; | prev_p = p; | ||||
// prev2_tph = prevw_tph; | |||||
prevw_tph = prev_tph = tph; | prevw_tph = prev_tph = tph; | ||||
pause = 0; | pause = 0; | ||||
} | } |
#include "synthesize.h" | #include "synthesize.h" | ||||
#include "voice.h" | #include "voice.h" | ||||
extern unsigned char *out_ptr; // **JSD | |||||
extern unsigned char *out_ptr; | |||||
extern unsigned char *out_start; | extern unsigned char *out_start; | ||||
extern unsigned char *out_end; | extern unsigned char *out_end; | ||||
extern WGEN_DATA wdata; | extern WGEN_DATA wdata; | ||||
fla = (double) kt_globals.f0_flutter / 50; | fla = (double) kt_globals.f0_flutter / 50; | ||||
flb = (double) kt_globals.original_f0 / 100; | flb = (double) kt_globals.original_f0 / 100; | ||||
// flc = sin(2*PI*12.7*time_count); | |||||
// fld = sin(2*PI*7.1*time_count); | |||||
// fle = sin(2*PI*4.7*time_count); | |||||
flc = sin(PI*12.7*time_count); // because we are calling flutter() more frequently, every 2.9mS | flc = sin(PI*12.7*time_count); // because we are calling flutter() more frequently, every 2.9mS | ||||
fld = sin(PI*7.1*time_count); | fld = sin(PI*7.1*time_count); | ||||
fle = sin(PI*4.7*time_count); | fle = sin(PI*4.7*time_count); | ||||
if (kt_globals.nopen >= (kt_globals.T0-1)) | if (kt_globals.nopen >= (kt_globals.T0-1)) | ||||
{ | { | ||||
// printf("Warning: glottal open period cannot exceed T0, truncated\n"); | |||||
kt_globals.nopen = kt_globals.T0 - 2; | kt_globals.nopen = kt_globals.T0 - 2; | ||||
} | } | ||||
if (kt_globals.nopen < 40) | if (kt_globals.nopen < 40) | ||||
{ | { | ||||
/* F0 max = 1000 Hz */ | /* F0 max = 1000 Hz */ | ||||
// printf("Warning: minimum glottal open period is 10 samples.\n"); | |||||
// printf("truncated, nopen = %d\n",kt_globals.nopen); | |||||
kt_globals.nopen = 40; | kt_globals.nopen = 40; | ||||
} | } | ||||
temp = kt_globals.T0 - kt_globals.nopen; | temp = kt_globals.T0 - kt_globals.nopen; | ||||
if (frame->Kskew > temp) | if (frame->Kskew > temp) | ||||
{ | { | ||||
// printf("Kskew duration=%d > glottal closed period=%d, truncate\n", frame->Kskew, kt_globals.T0 - kt_globals.nopen); | |||||
frame->Kskew = temp; | frame->Kskew = temp; | ||||
} | } | ||||
if (skew >= 0) | if (skew >= 0) | ||||
f = -f; | f = -f; | ||||
//NOTE, changes made 30.09.2011 for Reece Dunn <[email protected]> | |||||
// fix a sound spike when f=0 | |||||
/* First compute ordinary resonator coefficients */ | /* First compute ordinary resonator coefficients */ | ||||
/* Let r = exp(-pi bw t) */ | /* Let r = exp(-pi bw t) */ | ||||
arg = kt_globals.minus_pi_t * bw; | arg = kt_globals.minus_pi_t * bw; | ||||
klattp1[ix] = klattp[ix] = fr1->klattp[ix]; | klattp1[ix] = klattp[ix] = fr1->klattp[ix]; | ||||
klattp_inc[ix] = (double)((fr2->klattp[ix] - klattp[ix]) * STEPSIZE)/length; | klattp_inc[ix] = (double)((fr2->klattp[ix] - klattp[ix]) * STEPSIZE)/length; | ||||
} | } | ||||
// get klatt parameter adjustments for the voice | |||||
// if((ix>0) && (ix < KLATT_AVp)) | |||||
// klattp1[ix] = klattp[ix] = (klattp[ix] + wvoice->klattv[ix]); | |||||
} | } | ||||
nsamples = length; | nsamples = length; | ||||
int ix; | int ix; | ||||
for(ix=0; ix<256; ix++) | |||||
{ | |||||
// TEST: Overwrite natural_samples2 | |||||
// sawtooth wave | |||||
// natural_samples2[ix] = (128-ix) * 20; | |||||
} | |||||
sample_count=0; | sample_count=0; | ||||
kt_globals.synthesis_model = CASCADE_PARALLEL; | kt_globals.synthesis_model = CASCADE_PARALLEL; |
CAPITAL, | CAPITAL, | ||||
LETTER('z',M_CARON,0), | LETTER('z',M_CARON,0), | ||||
LETTER('s',M_NAME,0), // long-s // U+17f | LETTER('s',M_NAME,0), // long-s // U+17f | ||||
// LETTER('b',M_STROKE,0), | |||||
}; | }; | ||||
0, // open-e | 0, // open-e | ||||
LETTER(L_OPEN_E,M_REVERSED,0), | LETTER(L_OPEN_E,M_REVERSED,0), | ||||
LETTER(L_OPEN_E,M_HOOK,M_REVERSED), | LETTER(L_OPEN_E,M_HOOK,M_REVERSED), | ||||
0,//LETTER(L_OPEN_E,M_CLOSED,M_REVERSED), | |||||
0, | |||||
LETTER('j',M_BAR,0), | LETTER('j',M_BAR,0), | ||||
LETTER('g',M_IMPLOSIVE,0), // U+260 | LETTER('g',M_IMPLOSIVE,0), // U+260 | ||||
LETTER('g',0,0), | LETTER('g',0,0), | ||||
0, // ramshorn | 0, // ramshorn | ||||
LETTER('h',M_TURNED,0), | LETTER('h',M_TURNED,0), | ||||
LETTER('h',M_HOOK,0), | LETTER('h',M_HOOK,0), | ||||
0,//LETTER(L_HENG,M_HOOK,0), | |||||
0, | |||||
LETTER('i',M_BAR,0), // U+268 | LETTER('i',M_BAR,0), // U+268 | ||||
LETTER(L_IOTA,0,0), | LETTER(L_IOTA,0,0), | ||||
LETTER('i',M_SMALLCAP,0), | LETTER('i',M_SMALLCAP,0), | ||||
LETTER('l',M_RETROFLEX,0), | LETTER('l',M_RETROFLEX,0), | ||||
LIGATURE('l','z',0), | LIGATURE('l','z',0), | ||||
LETTER('m',M_TURNED,0), | LETTER('m',M_TURNED,0), | ||||
0,//LETTER('m',M_TURNED,M_LEG), // U+270 | |||||
0, | |||||
LETTER('m',M_HOOK,0), | LETTER('m',M_HOOK,0), | ||||
0,//LETTER('n',M_LEFTHOOK,0), | |||||
0, | |||||
LETTER('n',M_RETROFLEX,0), | LETTER('n',M_RETROFLEX,0), | ||||
LETTER('n',M_SMALLCAP,0), | LETTER('n',M_SMALLCAP,0), | ||||
LETTER('o',M_BAR,0), | LETTER('o',M_BAR,0), | ||||
LIGATURE('o','e',M_SMALLCAP), | LIGATURE('o','e',M_SMALLCAP), | ||||
0,//LETTER(L_OMEGA,M_CLOSED,0), | |||||
0, | |||||
LETTER(L_PHI,0,0), // U+278 | LETTER(L_PHI,0,0), // U+278 | ||||
LETTER('r',M_TURNED,0), | LETTER('r',M_TURNED,0), | ||||
LETTER(L_RLONG,M_TURNED,0), | LETTER(L_RLONG,M_TURNED,0), | ||||
LETTER('r',M_RETROFLEX,M_TURNED), | LETTER('r',M_RETROFLEX,M_TURNED), | ||||
0,//LETTER('r',M_LEG,0), | |||||
0, | |||||
LETTER('r',M_RETROFLEX,0), | LETTER('r',M_RETROFLEX,0), | ||||
0, // r-tap | 0, // r-tap | ||||
LETTER(L_RTAP,M_REVERSED,0), | LETTER(L_RTAP,M_REVERSED,0), | ||||
LETTER('r',M_TURNED,M_SMALLCAP), | LETTER('r',M_TURNED,M_SMALLCAP), | ||||
LETTER('s',M_RETROFLEX,0), | LETTER('s',M_RETROFLEX,0), | ||||
0, // esh | 0, // esh | ||||
LETTER('j',M_HOOK,0), //LETTER('j',M_HOOK,M_BAR), | |||||
LETTER('j',M_HOOK,0), | |||||
LETTER(L_ESH,M_REVERSED,0), | LETTER(L_ESH,M_REVERSED,0), | ||||
LETTER(L_ESH,M_CURL,0), | LETTER(L_ESH,M_CURL,0), | ||||
LETTER('t',M_TURNED,0), | LETTER('t',M_TURNED,0), | ||||
0, // glottal stop | 0, // glottal stop | ||||
LETTER(L_GLOTTAL,M_REVERSED,0), | LETTER(L_GLOTTAL,M_REVERSED,0), | ||||
LETTER(L_GLOTTAL,M_TURNED,0), | LETTER(L_GLOTTAL,M_TURNED,0), | ||||
0,//LETTER('c',M_LONG,0), | |||||
0, | |||||
0, // bilabial click // U+298 | 0, // bilabial click // U+298 | ||||
LETTER('b',M_SMALLCAP,0), | LETTER('b',M_SMALLCAP,0), | ||||
0,//LETTER(L_OPEN_E,M_CLOSED,0), | |||||
0, | |||||
LETTER('g',M_IMPLOSIVE,M_SMALLCAP), | LETTER('g',M_IMPLOSIVE,M_SMALLCAP), | ||||
LETTER('h',M_SMALLCAP,0), | LETTER('h',M_SMALLCAP,0), | ||||
LETTER('j',M_CURL,0), | LETTER('j',M_CURL,0), | ||||
{ | { | ||||
if(accent2 != 0) | if(accent2 != 0) | ||||
{ | { | ||||
if((flags2 = Lookup(tr, accents_tab[accent2].name, ph_accent2)) == 0) | |||||
{ | |||||
// break; | |||||
} | |||||
flags2 = Lookup(tr, accents_tab[accent2].name, ph_accent2); | |||||
if(flags2 & FLAG_ACCENT_BEFORE) | if(flags2 & FLAG_ACCENT_BEFORE) | ||||
{ | { | ||||
strcpy(ph_buf,ph_accent2); | strcpy(ph_buf,ph_accent2); | ||||
speak_letter_number = 0; | speak_letter_number = 0; | ||||
} | } | ||||
// if((ph_alphabet[0] != 0) && speak_letter_number) | |||||
// ph_buf[0] = 0; // don't speak "letter" if we speak alphabet name | |||||
if(speak_letter_number) | if(speak_letter_number) | ||||
{ | { | ||||
if(al_offset == 0x2800) | if(al_offset == 0x2800) | ||||
if(((tr->langopts.numbers & NUM_1900) != 0) && (hundreds == 19)) | if(((tr->langopts.numbers & NUM_1900) != 0) && (hundreds == 19)) | ||||
{ | { | ||||
// speak numbers such as 1984 as years: nineteen-eighty-four | // speak numbers such as 1984 as years: nineteen-eighty-four | ||||
// ph_100[0] = 0; // don't say "hundred", we also need to surpess "and" | |||||
} | } | ||||
else if(hundreds >= 10) | else if(hundreds >= 10) | ||||
{ | { | ||||
if(prev_thousands || (word[0] != '0')) | if(prev_thousands || (word[0] != '0')) | ||||
{ | { | ||||
// don't check for ordinal if the number has a leading zero | // don't check for ordinal if the number has a leading zero | ||||
if((ordinal = CheckDotOrdinal(tr, word, &word[ix], wtab, 0)) != 0) | |||||
{ | |||||
// dot_ordinal = 1; | |||||
} | |||||
ordinal = CheckDotOrdinal(tr, word, &word[ix], wtab, 0); | |||||
} | } | ||||
if((word[ix] == '.') && !IsDigit09(word[ix+1]) && !IsDigit09(word[ix+2]) && !(wtab[1].flags & FLAG_NOSPACE)) | if((word[ix] == '.') && !IsDigit09(word[ix+1]) && !IsDigit09(word[ix+2]) && !(wtab[1].flags & FLAG_NOSPACE)) | ||||
if(thousands_inc > 0) | if(thousands_inc > 0) | ||||
{ | { | ||||
if(thousandplex > 0) | if(thousandplex > 0) | ||||
// if((thousandplex > 0) && (value < 1000)) | |||||
{ | { | ||||
if((suppress_null == 0) && (LookupThousands(tr,value,thousandplex, thousands_exact, ph_append))) | if((suppress_null == 0) && (LookupThousands(tr,value,thousandplex, thousands_exact, ph_append))) | ||||
{ | { | ||||
} | } | ||||
} | } | ||||
// if((buf_digit_lookup[0] == 0) && (*p != '0') && (dot_ordinal==0)) | |||||
if((buf_digit_lookup[0] == 0) && (*p != '0')) | if((buf_digit_lookup[0] == 0) && (*p != '0')) | ||||
{ | { | ||||
// LANG=hu ? | // LANG=hu ? | ||||
utf8_in(&next_char,p); | utf8_in(&next_char,p); | ||||
if(!iswalpha2(next_char) && (thousands_exact==0)) | if(!iswalpha2(next_char) && (thousands_exact==0)) | ||||
// if(!iswalpha2(next_char) && !((wtab[thousandplex].flags & FLAG_HYPHEN_AFTER) && (thousands_exact != 0))) | |||||
strcat(ph_out,str_pause); // don't add pause for 100s, 6th, etc. | strcat(ph_out,str_pause); // don't add pause for 100s, 6th, etc. | ||||
} | } | ||||
insert_ph = phdata.pd_param[pd_APPENDPHONEME]; | insert_ph = phdata.pd_param[pd_APPENDPHONEME]; | ||||
} | } | ||||
if(ph->phflags & phVOICED) | |||||
{ | |||||
// check that a voiced consonant is preceded or followed by a vowel or liquid | |||||
// and if not, add a short schwa | |||||
// not yet implemented | |||||
} | |||||
if(deleted == 0) | if(deleted == 0) | ||||
{ | { | ||||
phlist[ix].ph = ph; | phlist[ix].ph = ph; | ||||
phlist[ix].newword = 0; | phlist[ix].newword = 0; | ||||
} | } | ||||
// phlist[ix].length = ph->std_length; | |||||
phlist[ix].length = phdata.pd_param[i_SET_LENGTH]*2; | phlist[ix].length = phdata.pd_param[i_SET_LENGTH]*2; | ||||
if((ph->code == phonPAUSE_LONG) && (option_wordgap > 0) && (plist3[1].sourceix != 0)) | if((ph->code == phonPAUSE_LONG) && (option_wordgap > 0) && (plist3[1].sourceix != 0)) | ||||
{ | { |
static int sayas_start; | static int sayas_start; | ||||
static int ssml_ignore_l_angle = 0; | static int ssml_ignore_l_angle = 0; | ||||
// alter tone for announce punctuation or capitals | |||||
//static const char *tone_punct_on = "\0016T"; // add reverberation, lower pitch | |||||
//static const char *tone_punct_off = "\001T\001P"; | |||||
// punctuations symbols that can end a clause | // punctuations symbols that can end a clause | ||||
static const unsigned short punct_chars[] = {',','.','?','!',':',';', | static const unsigned short punct_chars[] = {',','.','?','!',':',';', | ||||
0x00a1, // inverted exclamation | 0x00a1, // inverted exclamation | ||||
f = fopen(fname,"rb"); | f = fopen(fname,"rb"); | ||||
if(f == NULL) | if(f == NULL) | ||||
{ | { | ||||
// fprintf(stderr,"Can't read temp file: %s\n",fname); | |||||
fprintf(stderr,"Can't read temp file: %s\n",fname); | |||||
return(3); | return(3); | ||||
} | } | ||||
} | } | ||||
if(punct_count==1) | if(punct_count==1) | ||||
{ | { | ||||
// sprintf(buf,"%s %s %s",tone_punct_on,punctname,tone_punct_off); | |||||
sprintf(buf," %s",punctname); // we need the space before punctname, to ensure it doesn't merge with the previous word (eg. "2.-a") | sprintf(buf," %s",punctname); // we need the space before punctname, to ensure it doesn't merge with the previous word (eg. "2.-a") | ||||
} | } | ||||
else | else | ||||
if(!iswspace(c1)) | if(!iswspace(c1)) | ||||
{ | { | ||||
if(!IsAlpha(c1) || !iswlower2(c1)) | if(!IsAlpha(c1) || !iswlower2(c1)) | ||||
// if(iswdigit(c1) || (IsAlpha(c1) && !iswlower2(c1))) | |||||
{ | { | ||||
UngetC(c2); | UngetC(c2); | ||||
ungot_char2 = c1; | ungot_char2 = c1; | ||||
} | } | ||||
if((iswspace(c2) || (punct_data & 0x8000) || IsBracket(c2) || (c2=='?') || Eof() || (c2 == ctrl_embedded))) // don't check for '-' because it prevents recognizing ':-)' | if((iswspace(c2) || (punct_data & 0x8000) || IsBracket(c2) || (c2=='?') || Eof() || (c2 == ctrl_embedded))) // don't check for '-' because it prevents recognizing ':-)' | ||||
// if((iswspace(c2) || (punct_data & 0x8000) || IsBracket(c2) || (c2=='?') || (c2=='-') || Eof())) | |||||
{ | { | ||||
// note: (c2='?') is for when a smart-quote has been replaced by '?' | // note: (c2='?') is for when a smart-quote has been replaced by '?' | ||||
is_end_clause = 1; | is_end_clause = 1; | ||||
if(iswlower2(c_next)) | if(iswlower2(c_next)) | ||||
{ | { | ||||
// next word has no capital letter, this dot is probably from an abbreviation | // next word has no capital letter, this dot is probably from an abbreviation | ||||
// c1 = ' '; | |||||
is_end_clause = 0; | is_end_clause = 0; | ||||
} | } | ||||
if(any_alnum==0) | if(any_alnum==0) | ||||
if(c1 == 0xe000 + '<') c1 = '<'; | if(c1 == 0xe000 + '<') c1 = '<'; | ||||
ix += utf8_out(c1,&buf[ix]); // buf[ix++] = c1; | |||||
ix += utf8_out(c1,&buf[ix]); | |||||
if(!iswspace(c1) && !IsBracket(c1)) | if(!iswspace(c1) && !IsBracket(c1)) | ||||
{ | { | ||||
charix[ix] = count_characters - clause_start_char; | charix[ix] = count_characters - clause_start_char; |
return; | return; | ||||
} | } | ||||
#ifdef TEST_SPEED | |||||
if(wpm > 1000) | |||||
{ | |||||
// TESTING | |||||
// test = wpm / 1000; | |||||
wpm = wpm % 1000; | |||||
} | |||||
#endif | |||||
if(wpm > 450) | if(wpm > 450) | ||||
wpm = 450; | wpm = 450; | ||||
if(wpm > 430) | if(wpm > 430) | ||||
{ | { | ||||
speed.pause_factor = 12; | speed.pause_factor = 12; | ||||
// speed.clause_pause_factor = 15; | |||||
} | } | ||||
else | else | ||||
if(wpm > 400) | if(wpm > 400) | ||||
{ | { | ||||
speed.pause_factor = 13; | speed.pause_factor = 13; | ||||
// speed.clause_pause_factor = 15; | |||||
} | } | ||||
else | else | ||||
if(wpm > 374) | if(wpm > 374) | ||||
speed.clause_pause_factor = 16; | speed.clause_pause_factor = 16; | ||||
} | } | ||||
} | } | ||||
#ifdef TEST_SPEED | |||||
//if(control==3) | |||||
printf("%3d: speedf %d %d %d x=%d pause=%d %d wav=%d lenmod=%d %d\n",wpm,speed1,speed2,speed3, speed_lookup[wpm2-80], speed.pause_factor,speed.clause_pause_factor, speed.wav_factor,speed.lenmod_factor,speed.lenmod2_factor); | |||||
#endif | |||||
} // end of SetSpeed | } // end of SetSpeed | ||||
#else // not using sonic speed-up | #else // not using sonic speed-up | ||||
if(control == 2) | if(control == 2) | ||||
wpm = embedded_value[EMBED_S2]; | wpm = embedded_value[EMBED_S2]; | ||||
#ifdef TEST_SPEED | |||||
if(wpm > 1000) | |||||
{ | |||||
// TESTING | |||||
test = wpm / 1000; | |||||
wpm = wpm % 1000; | |||||
} | |||||
#endif | |||||
if(voice->speed_percent > 0) | if(voice->speed_percent > 0) | ||||
{ | { | ||||
wpm = (wpm * voice->speed_percent)/100; | wpm = (wpm * voice->speed_percent)/100; | ||||
if(wpm > 430) | if(wpm > 430) | ||||
{ | { | ||||
speed.pause_factor = 12; | speed.pause_factor = 12; | ||||
// speed.clause_pause_factor = 15; | |||||
} | } | ||||
else | else | ||||
if(wpm > 400) | if(wpm > 400) | ||||
{ | { | ||||
speed.pause_factor = 13; | speed.pause_factor = 13; | ||||
// speed.clause_pause_factor = 15; | |||||
} | } | ||||
else | else | ||||
if(wpm > 374) | if(wpm > 374) | ||||
speed.clause_pause_factor = 16; | speed.clause_pause_factor = 16; | ||||
} | } | ||||
} | } | ||||
#ifdef TEST_SPEED | |||||
//if(control==3) | |||||
printf("%3d: speedf %d %d %d pause=%d %d wav=%d lenmod=%d %d\n",wpm,speed1,speed2,speed3, speed.pause_factor,speed.clause_pause_factor, speed.wav_factor,speed.lenmod_factor,speed.lenmod2_factor); | |||||
#endif | |||||
} // end of SetSpeed | } // end of SetSpeed | ||||
#endif // of INCLUDE_SONIC | #endif // of INCLUDE_SONIC | ||||
if(next->ph->mnemonic == ('/'*256+'r')) | if(next->ph->mnemonic == ('/'*256+'r')) | ||||
{ | { | ||||
next->synthflags &= ~SFLAG_SEQCONTINUE; | next->synthflags &= ~SFLAG_SEQCONTINUE; | ||||
// min_drop = 15; | |||||
} | } | ||||
} | } | ||||
} | } |
option_phoneme_events = (options & (espeakINITIALIZE_PHONEME_EVENTS | espeakINITIALIZE_PHONEME_IPA)); | option_phoneme_events = (options & (espeakINITIALIZE_PHONEME_EVENTS | espeakINITIALIZE_PHONEME_IPA)); | ||||
VoiceReset(0); | VoiceReset(0); | ||||
// SetVoiceByName("default"); | |||||
for(param=0; param<N_SPEECH_PARAM; param++) | for(param=0; param<N_SPEECH_PARAM; param++) | ||||
param_stack[0].parameter[param] = saved_parameters[param] = param_defaults[param]; | param_stack[0].parameter[param] = saved_parameters[param] = param_defaults[param]; | ||||
SetParameter(espeakCAPITALS,option_capitals,0); | SetParameter(espeakCAPITALS,option_capitals,0); | ||||
SetParameter(espeakPUNCTUATION,option_punctuation,0); | SetParameter(espeakPUNCTUATION,option_punctuation,0); | ||||
SetParameter(espeakWORDGAP,0,0); | SetParameter(espeakWORDGAP,0,0); | ||||
// DoVoiceChange(voice); | |||||
#ifdef USE_ASYNC | #ifdef USE_ASYNC | ||||
fifo_init(); | fifo_init(); | ||||
ESPEAK_API int espeak_IsPlaying(void) | ESPEAK_API int espeak_IsPlaying(void) | ||||
{//================================== | {//================================== | ||||
// ENTER("espeak_IsPlaying"); | |||||
#ifdef USE_ASYNC | #ifdef USE_ASYNC | ||||
if((my_mode == AUDIO_OUTPUT_PLAYBACK) && wave_is_busy(my_audio)) | if((my_mode == AUDIO_OUTPUT_PLAYBACK) && wave_is_busy(my_audio)) | ||||
return(1); | return(1); |
w=c[i+1]-d[i]; | w=c[i+1]-d[i]; | ||||
if((den=ho-hp) == 0.0) | if((den=ho-hp) == 0.0) | ||||
{ | { | ||||
// fprintf(stderr,"Error in routine 'polint'"); | |||||
return(ya[2]); // two input xa are identical | return(ya[2]); // two input xa are identical | ||||
} | } | ||||
den=w/den; | den=w/den; |
#define PLATFORM_POSIX | #define PLATFORM_POSIX | ||||
#define PATHSEP '/' | #define PATHSEP '/' | ||||
// USE_PORTAUDIO or USE_PULSEAUDIO are now defined in the makefile | |||||
//#define USE_PORTAUDIO | |||||
//#define USE_PULSEAUDIO | |||||
#define USE_NANOSLEEP | #define USE_NANOSLEEP | ||||
#define __cdecl | #define __cdecl | ||||
//#define ESPEAK_API extern "C" | |||||
#ifdef _ESPEAKEDIT | #ifdef _ESPEAKEDIT | ||||
#define LOG_FRAMES // write keyframe info to log-espeakedit | #define LOG_FRAMES // write keyframe info to log-espeakedit |
else | else | ||||
SetParameter(espeakVOICETYPE,1,0); | SetParameter(espeakVOICETYPE,1,0); | ||||
strcpy(mbrola_name,mbrola_voice); | strcpy(mbrola_name,mbrola_voice); | ||||
// mbrola_delay = 3800; // improve synchronization of events | |||||
mbrola_delay = 1000; // improve synchronization of events | mbrola_delay = 1000; // improve synchronization of events | ||||
return(EE_OK); | return(EE_OK); | ||||
} // end of LoadMbrolaTable | } // end of LoadMbrolaTable | ||||
// a pause phoneme, which has not been changed by the translation | // a pause phoneme, which has not been changed by the translation | ||||
name = '_'; | name = '_'; | ||||
len = (p->length * speed.pause_factor)/256; | len = (p->length * speed.pause_factor)/256; | ||||
// if(len == 0) continue; | |||||
if(len == 0) | if(len == 0) | ||||
len = 1; | len = 1; | ||||
} | } | ||||
InterpretPhoneme(NULL, 0, p, &phdata, NULL); | InterpretPhoneme(NULL, 0, p, &phdata, NULL); | ||||
fmtp.fmt_addr = phdata.sound_addr[pd_FMT]; | fmtp.fmt_addr = phdata.sound_addr[pd_FMT]; | ||||
len = DoSpect2(p->ph, 0, &fmtp, p, -1); | len = DoSpect2(p->ph, 0, &fmtp, p, -1); | ||||
// len = DoSpect(p->ph,prev->ph,phoneme_tab[phonPAUSE],2,p,-1); | |||||
len = (len * 1000)/samplerate; | len = (len * 1000)/samplerate; | ||||
if(next->type == phPAUSE) | if(next->type == phPAUSE) | ||||
len += 50; | len += 50; |
syllable_centre = -1; | syllable_centre = -1; | ||||
// initialise next_pause, a dummy phoneme_list entry | // initialise next_pause, a dummy phoneme_list entry | ||||
// next_pause.ph = phoneme_tab[phonPAUSE]; // this must be done after voice selection | |||||
next_pause.type = phPAUSE; | next_pause.type = phPAUSE; | ||||
next_pause.newword = 0; | next_pause.newword = 0; | ||||
} | } | ||||
if(wav_scale==0) | if(wav_scale==0) | ||||
min_length *= 2; // 16 bit samples | min_length *= 2; // 16 bit samples | ||||
else | |||||
{ | |||||
// increase consonant amplitude at high speeds, depending on the peak consonant amplitude | |||||
// x = ((35 - wav_scale) * speed.loud_consonants); | |||||
// if(x < 0) x = 0; | |||||
// wav_scale = (wav_scale * (x+256))/256; | |||||
} | |||||
if(std_length > 0) | if(std_length > 0) | ||||
{ | { | ||||
// don't let length exceed std_length | // don't let length exceed std_length | ||||
length = std_length; | length = std_length; | ||||
} | } | ||||
else | |||||
{ | |||||
// reduce the reduction in length | |||||
// length = (length + std_length)/2; | |||||
} | |||||
} | } | ||||
if(length < min_length) | if(length < min_length) | ||||
{//==================================================================================================================== | {//==================================================================================================================== | ||||
int x; | int x; | ||||
//hf_reduce = 70; // ?? using fixed amount rather than the parameter?? | |||||
target = (target * voice->formant_factor)/256; | target = (target * voice->formant_factor)/256; | ||||
x = (target - fr->ffreq[2]) / 2; | x = (target - fr->ffreq[2]) / 2; | ||||
f1 = ((data2 >> 26) & 0x7); | f1 = ((data2 >> 26) & 0x7); | ||||
vcolour = (data2 >> 29); | vcolour = (data2 >> 29); | ||||
// fprintf(stderr,"FMT%d %3s %3d-%3d f1=%d f2=%4d %4d %4d f3=%4d %3d\n", | |||||
// which,WordToString(other_ph->mnemonic),len,rms,f1,f2,f2_min,f2_max,f3_adj,f3_amp); | |||||
if((other_ph != NULL) && (other_ph->mnemonic == '?')) | if((other_ph != NULL) && (other_ph->mnemonic == '?')) | ||||
flags |= 8; | flags |= 8; | ||||
if(voice->klattv[0]) | if(voice->klattv[0]) | ||||
{ | { | ||||
// fr->klattp[KLATT_AV] = 53; // reduce the amplituide of the start of a vowel | |||||
fr->klattp[KLATT_AV] = seq[1].frame->klattp[KLATT_AV] - 4; | fr->klattp[KLATT_AV] = seq[1].frame->klattp[KLATT_AV] - 4; | ||||
} | } | ||||
if(f2 != 0) | if(f2 != 0) | ||||
if(flags & 8) | if(flags & 8) | ||||
{ | { | ||||
// set_frame_rms(fr,next_rms - 5); | |||||
modn_flags = 0x800 + (VowelCloseness(fr) << 8); | modn_flags = 0x800 + (VowelCloseness(fr) << 8); | ||||
} | } | ||||
} | } | ||||
if(!next->newword) | if(!next->newword) | ||||
{ | { | ||||
if(next->type==phLIQUID) released = 1; | if(next->type==phLIQUID) released = 1; | ||||
// if(((p->ph->phflags & phPLACE) == phPLACE_blb) && (next->ph->phflags & phSIBILANT)) released = 1; | |||||
} | } | ||||
if(released == 0) | if(released == 0) | ||||
p->synthflags |= SFLAG_NEXT_PAUSE; | p->synthflags |= SFLAG_NEXT_PAUSE; |
tr->langopts.roman_suffix = ""; | tr->langopts.roman_suffix = ""; | ||||
SetLengthMods(tr,201); | SetLengthMods(tr,201); | ||||
// tr->langopts.length_mods = length_mods_en; | |||||
// tr->langopts.length_mods0 = length_mods_en0; | |||||
tr->langopts.long_stop = 100; | tr->langopts.long_stop = 100; | ||||
0x1213, // тс 25076 | 0x1213, // тс 25076 | ||||
0x1220, // яс 14310 | 0x1220, // яс 14310 | ||||
0x7fff}; | 0x7fff}; | ||||
//0x040f ог 12976 | |||||
//0x1306 ет 12826 | |||||
//0x0f0d мо 12688 | |||||
SetupTranslator(tr,stress_lengths_cy,stress_amps_cy); | SetupTranslator(tr,stress_lengths_cy,stress_amps_cy); | ||||
tr->charset_a0 = charsets[14]; // ISO-8859-14 | tr->charset_a0 = charsets[14]; // ISO-8859-14 | ||||
// tr->langopts.length_mods0 = tr->langopts.length_mods; // don't lengthen vowels in the last syllable | |||||
tr->langopts.stress_rule = STRESSPOSN_2R; | tr->langopts.stress_rule = STRESSPOSN_2R; | ||||
// tr->langopts.intonation_group = 4; | |||||
// 'diminished' is an unstressed final syllable | // 'diminished' is an unstressed final syllable | ||||
tr->langopts.stress_flags = S_FINAL_DIM_ONLY | S_FINAL_NO_2; | tr->langopts.stress_flags = S_FINAL_DIM_ONLY | S_FINAL_NO_2; | ||||
tr->langopts.param[LOPT_LONG_VOWEL_THRESHOLD] = 175/2; | tr->langopts.param[LOPT_LONG_VOWEL_THRESHOLD] = 175/2; | ||||
tr->langopts.numbers = NUM_DECIMAL_COMMA | NUM_SWAP_TENS | NUM_ALLOW_SPACE | NUM_ORDINAL_DOT | NUM_ROMAN; | tr->langopts.numbers = NUM_DECIMAL_COMMA | NUM_SWAP_TENS | NUM_ALLOW_SPACE | NUM_ORDINAL_DOT | NUM_ROMAN; | ||||
// tr->langopts.numbers = NUM_DECIMAL_COMMA | NUM_SWAP_TENS | NUM_OMIT_1_HUNDRED | NUM_OMIT_1_THOUSAND | NUM_ALLOW_SPACE | NUM_ORDINAL_DOT | NUM_ROMAN; | |||||
SetLetterVowel(tr,'y'); | SetLetterVowel(tr,'y'); | ||||
tr->langopts.param[LOPT_UNPRONOUNCABLE] = 2; // use de_rules for unpronouncable rules | tr->langopts.param[LOPT_UNPRONOUNCABLE] = 2; // use de_rules for unpronouncable rules | ||||
} | } | ||||
case L('e','o'): | case L('e','o'): | ||||
{ | { | ||||
// static const short stress_lengths_eo[8] = {150, 150, 230, 180, 0, 0, 300, 320}; | |||||
static const short stress_lengths_eo[8] = {150, 140, 180, 180, 0, 0, 200, 200}; | static const short stress_lengths_eo[8] = {150, 140, 180, 180, 0, 0, 200, 200}; | ||||
static const unsigned char stress_amps_eo[] = {16,14, 20,20, 20,22, 22,21 }; | static const unsigned char stress_amps_eo[] = {16,14, 20,20, 20,22, 22,21 }; | ||||
static const wchar_t eo_char_apostrophe[2] = {'l',0}; | static const wchar_t eo_char_apostrophe[2] = {'l',0}; | ||||
tr->charset_a0 = charsets[3]; // ISO-8859-3 | tr->charset_a0 = charsets[3]; // ISO-8859-3 | ||||
tr->char_plus_apostrophe = eo_char_apostrophe; | tr->char_plus_apostrophe = eo_char_apostrophe; | ||||
// tr->langopts.word_gap = 1; | |||||
tr->langopts.vowel_pause = 2; | tr->langopts.vowel_pause = 2; | ||||
tr->langopts.stress_rule = STRESSPOSN_2R; | tr->langopts.stress_rule = STRESSPOSN_2R; | ||||
tr->langopts.stress_flags = S_FINAL_DIM_ONLY | S_FINAL_NO_2; | tr->langopts.stress_flags = S_FINAL_DIM_ONLY | S_FINAL_NO_2; | ||||
// tr->langopts.unstressed_wd1 = 3; | |||||
tr->langopts.unstressed_wd2 = 2; | tr->langopts.unstressed_wd2 = 2; | ||||
tr->langopts.numbers = NUM_DECIMAL_COMMA | NUM_OMIT_1_HUNDRED | NUM_ALLOW_SPACE | NUM_ROMAN; | tr->langopts.numbers = NUM_DECIMAL_COMMA | NUM_OMIT_1_HUNDRED | NUM_ALLOW_SPACE | NUM_ROMAN; | ||||
tr->langopts.numbers = NUM_DECIMAL_COMMA + NUM_ALLOW_SPACE; | tr->langopts.numbers = NUM_DECIMAL_COMMA + NUM_ALLOW_SPACE; | ||||
SetLetterVowel(tr,'y'); | SetLetterVowel(tr,'y'); | ||||
// tr->langopts.max_initial_consonants = 2; // BUT foreign words may have 3 | |||||
tr->langopts.spelling_stress = 1; | tr->langopts.spelling_stress = 1; | ||||
tr->langopts.intonation_group = 3; // less intonation, don't raise pitch at comma | tr->langopts.intonation_group = 3; // less intonation, don't raise pitch at comma | ||||
} | } | ||||
case L('h','t'): // Haitian Creole | case L('h','t'): // Haitian Creole | ||||
// memcpy(tr->stress_lengths,stress_lengths_fr,sizeof(tr->stress_lengths)); | |||||
tr->langopts.stress_rule = STRESSPOSN_1R; // stress on final syllable | tr->langopts.stress_rule = STRESSPOSN_1R; // stress on final syllable | ||||
tr->langopts.stress_flags = S_NO_AUTO_2 | S_FINAL_DIM; // don't use secondary stress | tr->langopts.stress_flags = S_NO_AUTO_2 | S_FINAL_DIM; // don't use secondary stress | ||||
tr->langopts.numbers = NUM_SINGLE_STRESS | NUM_OMIT_1_HUNDRED | NUM_NOPAUSE | NUM_ROMAN | NUM_VIGESIMAL | NUM_DFRACTION_4; | tr->langopts.numbers = NUM_SINGLE_STRESS | NUM_OMIT_1_HUNDRED | NUM_NOPAUSE | NUM_ROMAN | NUM_VIGESIMAL | NUM_DFRACTION_4; | ||||
SetLetterBits(tr,LETTERGP_C,hy_consonants2); // add 'j' | SetLetterBits(tr,LETTERGP_C,hy_consonants2); // add 'j' | ||||
tr->langopts.max_initial_consonants = 6; | tr->langopts.max_initial_consonants = 6; | ||||
tr->langopts.numbers = NUM_DECIMAL_COMMA | NUM_ALLOW_SPACE | NUM_OMIT_1_HUNDRED; | tr->langopts.numbers = NUM_DECIMAL_COMMA | NUM_ALLOW_SPACE | NUM_OMIT_1_HUNDRED; | ||||
// tr->langopts.param[LOPT_UNPRONOUNCABLE] = 1; // disable check for unpronouncable words | |||||
} | } | ||||
break; | break; | ||||
SetupTranslator(tr,stress_lengths_jbo,NULL); | SetupTranslator(tr,stress_lengths_jbo,NULL); | ||||
tr->langopts.stress_rule = STRESSPOSN_2R; | tr->langopts.stress_rule = STRESSPOSN_2R; | ||||
tr->langopts.vowel_pause = 0x20c; // pause before a word which starts with a vowel, or after a word which ends in a consonant | tr->langopts.vowel_pause = 0x20c; // pause before a word which starts with a vowel, or after a word which ends in a consonant | ||||
// tr->langopts.word_gap = 1; | |||||
tr->punct_within_word = jbo_punct_within_word; | tr->punct_within_word = jbo_punct_within_word; | ||||
tr->langopts.param[LOPT_CAPS_IN_WORD] = 2; // capitals indicate stressed syllables | tr->langopts.param[LOPT_CAPS_IN_WORD] = 2; // capitals indicate stressed syllables | ||||
SetLetterVowel(tr,'y'); | SetLetterVowel(tr,'y'); | ||||
tr->langopts.stress_rule = STRESSPOSN_1L; | tr->langopts.stress_rule = STRESSPOSN_1L; | ||||
tr->langopts.stress_flags = S_FINAL_NO_2; | tr->langopts.stress_flags = S_FINAL_NO_2; | ||||
tr->letter_bits_offset = OFFSET_GEORGIAN; | tr->letter_bits_offset = OFFSET_GEORGIAN; | ||||
// tr->langopts.param[LOPT_UNPRONOUNCABLE] = 1; // disable check for unpronouncable words | |||||
tr->langopts.max_initial_consonants = 7; | tr->langopts.max_initial_consonants = 7; | ||||
tr->langopts.numbers = NUM_VIGESIMAL | NUM_AND_UNITS | NUM_OMIT_1_HUNDRED |NUM_OMIT_1_THOUSAND | NUM_DFRACTION_5 | NUM_ROMAN; | tr->langopts.numbers = NUM_VIGESIMAL | NUM_AND_UNITS | NUM_OMIT_1_HUNDRED |NUM_OMIT_1_THOUSAND | NUM_DFRACTION_5 | NUM_ROMAN; | ||||
tr->langopts.stress_rule = STRESSPOSN_1L; | tr->langopts.stress_rule = STRESSPOSN_1L; | ||||
tr->langopts.word_gap = 0x21; // length of a final vowel is less dependent on the next consonant, don't merge consonant with next word | tr->langopts.word_gap = 0x21; // length of a final vowel is less dependent on the next consonant, don't merge consonant with next word | ||||
// tr->langopts.vowel_pause = 4; | |||||
tr->letter_groups[0] = tr->letter_groups[7] = vowels_vi; | tr->letter_groups[0] = tr->letter_groups[7] = vowels_vi; | ||||
tr->langopts.tone_language = 1; // Tone language, use CalcPitches_Tone() rather than CalcPitches() | tr->langopts.tone_language = 1; // Tone language, use CalcPitches_Tone() rather than CalcPitches() | ||||
tr->langopts.unstressed_wd1 = 2; | tr->langopts.unstressed_wd1 = 2; | ||||
tr->langopts.testing = 2; | tr->langopts.testing = 2; | ||||
} // end of Translator_Russian | } // end of Translator_Russian | ||||
/* | |||||
typedef struct { | |||||
int flags; | |||||
unsigned char stress; // stress level of this vowel | |||||
unsigned char stress_highest; // the highest stress level of a vowel in this word | |||||
unsigned char n_vowels; // number of vowels in the word | |||||
unsigned char vowel_this; // syllable number of this vowel (counting from 1) | |||||
unsigned char vowel_stressed; // syllable number of the highest stressed vowel | |||||
} CHANGEPH; | |||||
*/ | |||||
#ifdef RUSSIAN2 | |||||
// This is now done in the phoneme data, ph_russian | |||||
int ChangePhonemes_ru(Translator *tr, PHONEME_LIST2 *phlist, int n_ph, int index, PHONEME_TAB *ph, CHANGEPH *ch) | |||||
{//============================================================================================================= | |||||
// Called for each phoneme in the phoneme list, to allow a language to make changes | |||||
// ph The current phoneme | |||||
int variant; | |||||
int vowelix; | |||||
PHONEME_TAB *prev, *next; | |||||
if(ch->flags & 8) | |||||
return(0); // full phoneme translation has already been given | |||||
// Russian vowel softening and reduction rules | |||||
if(ph->type == phVOWEL) | |||||
{ | |||||
int prestressed = ch->vowel_stressed==ch->vowel_this+1; // the next vowel after this has the main stress | |||||
#define N_VOWELS_RU 11 | |||||
static unsigned int vowels_ru[N_VOWELS_RU] = {'a','V','O','I',PH('I','#'),PH('E','#'),PH('E','2'), | |||||
PH('V','#'),PH('I','3'),PH('I','2'),PH('E','3')}; | |||||
static unsigned int vowel_replace[N_VOWELS_RU][6] = { | |||||
// stressed, soft, soft-stressed, j+stressed, j+soft, j+soft-stressed | |||||
/*0*/ {'A', 'I', PH('j','a'), 'a', 'a', 'a'}, // a Uses 3,4,5 columns. | |||||
/*1*/ {'A', 'V', PH('j','a'), 'a', 'V', 'a'}, // V Uses 3,4,5 columns. | |||||
/*2*/ {'o', '8', '8', 'o', '8', '8'}, // O | |||||
/*3*/ {'i', 'I', 'i', 'a', 'I', 'a'}, // I Uses 3,4,5 columns. | |||||
/*4*/ {'i', PH('I','#'), 'i', 'i', PH('I','#'), 'i'}, // I# | |||||
/*5*/ {'E', PH('E','#'), 'E', 'e', PH('E','#'), 'e'}, // E# | |||||
/*6*/ {'E', PH('E','2'), 'E', 'e', PH('E','2'), 'e'}, // E2 Uses 3,4,5 columns. | |||||
/*7*/ {PH('j','a'), 'V', PH('j','a'), 'A', 'V', 'A'}, // V# | |||||
/*8*/ {PH('j','a'), 'I', PH('j','a'), 'e', 'I', 'e'}, // I3 Uses 3,4,5 columns. | |||||
/*9*/ {'e', 'I', 'e', 'e', 'I', 'e'}, // I2 | |||||
/*10*/ {'e', PH('E', '2'), 'e', 'e', PH('E','2'), 'e'} // E3 | |||||
}; | |||||
prev = phoneme_tab[phlist[index-1].phcode]; | |||||
next = phoneme_tab[phlist[index+1].phcode]; | |||||
// lookup the vowel name to get an index into the vowel_replace[] table | |||||
for(vowelix=0; vowelix<N_VOWELS_RU; vowelix++) | |||||
{ | |||||
if(vowels_ru[vowelix] == ph->mnemonic) | |||||
break; | |||||
} | |||||
if(vowelix == N_VOWELS_RU) | |||||
return(0); | |||||
if(prestressed) | |||||
{ | |||||
if((vowelix==6)&&(prev->mnemonic=='j')) | |||||
vowelix=8; | |||||
if(vowelix==1) | |||||
vowelix=0; | |||||
if(vowelix==4) | |||||
vowelix=3; | |||||
if(vowelix==6) | |||||
vowelix=5; | |||||
if(vowelix==7) | |||||
vowelix=8; | |||||
if(vowelix==10) | |||||
vowelix=9; | |||||
} | |||||
// do we need a variant of this vowel, depending on the stress and adjacent phonemes ? | |||||
variant = -1; | |||||
int stressed = ch->flags & 2; | |||||
int soft=prev->phflags & phPALATAL; | |||||
if (soft && stressed) | |||||
variant = 2; else | |||||
if (stressed) | |||||
variant = 0; else | |||||
if (soft) | |||||
variant = 1; | |||||
if(variant >= 0) | |||||
{ | |||||
if(prev->mnemonic == 'j') | |||||
variant += 3; | |||||
phlist[index].phcode = PhonemeCode(vowel_replace[vowelix][variant]); | |||||
} | |||||
else | |||||
{ | |||||
phlist[index].phcode = PhonemeCode(vowels_ru[vowelix]); | |||||
} | |||||
} | |||||
return(0); | |||||
} | |||||
#endif | |||||
// other characters which break a word, but don't produce a pause | // other characters which break a word, but don't produce a pause | ||||
static const unsigned short breaks[] = {'_', 0}; | static const unsigned short breaks[] = {'_', 0}; | ||||
// treat these characters as spaces, in addition to iswspace() | |||||
// static const wchar_t chars_space[] = {0x2500,0x2501,0}; // box drawing horiz | |||||
// Translate character codes 0xA0 to 0xFF into their unicode values | // Translate character codes 0xA0 to 0xFF into their unicode values | ||||
// ISO_8859_1 is set as default | // ISO_8859_1 is set as default | ||||
static const unsigned short ISO_8859_1[0x60] = { | static const unsigned short ISO_8859_1[0x60] = { | ||||
{ | { | ||||
if(word_length > 1) | if(word_length > 1) | ||||
return(FLAG_SPELLWORD); // a mixture of languages, retranslate as individual letters, separated by spaces | return(FLAG_SPELLWORD); // a mixture of languages, retranslate as individual letters, separated by spaces | ||||
if(phonemes[0] == phonSWITCH) | |||||
{ | |||||
// problem with espeak -vbg "b.c.d.e.f" | |||||
} | |||||
return(0); | return(0); | ||||
} | } | ||||
strcpy(word_phonemes, phonemes); | strcpy(word_phonemes, phonemes); | ||||
c = ' '; // lower case followed by upper case, treat as new word | c = ' '; // lower case followed by upper case, treat as new word | ||||
space_inserted = 1; | space_inserted = 1; | ||||
prev_in_save = c; | prev_in_save = c; | ||||
// next_word_flags |= FLAG_NOSPACE; // problem: prevents FLAG_HAS_DOT being set | |||||
} | } | ||||
} | } | ||||
else if((c != ' ') && iswupper2(prev_in) && iswlower2(next_in)) | else if((c != ' ') && iswupper2(prev_in) && iswlower2(next_in)) |
}; | }; | ||||
int tone_points[12] = {600,170, 1200,135, 2000,110, 3000,110, -1,0}; | int tone_points[12] = {600,170, 1200,135, 2000,110, 3000,110, -1,0}; | ||||
//int tone_points[12] = {250,200, 400,170, 600,170, 1200,135, 2000,110, -1,0}; | |||||
// limit the rate of change for each formant number | // limit the rate of change for each formant number | ||||
//static int formant_rate_22050[9] = {50, 104, 165, 230, 220, 220, 220, 220, 220}; // values for 22kHz sample rate | |||||
//static int formant_rate_22050[9] = {240, 180, 180, 180, 180, 180, 180, 180, 180}; // values for 22kHz sample rate | |||||
static int formant_rate_22050[9] = {240, 170, 170, 170, 170, 170, 170, 170, 170}; // values for 22kHz sample rate | static int formant_rate_22050[9] = {240, 170, 170, 170, 170, 170, 170, 170, 170}; // values for 22kHz sample rate | ||||
int formant_rate[9]; // values adjusted for actual sample rate | int formant_rate[9]; // values adjusted for actual sample rate | ||||
// these just set a value in langopts.param[] | // these just set a value in langopts.param[] | ||||
{"l_dieresis", 0x100+LOPT_DIERESES}, | {"l_dieresis", 0x100+LOPT_DIERESES}, | ||||
// {"l_lengthen", 0x100+LOPT_IT_LENGTHEN}, | |||||
{"l_prefix", 0x100+LOPT_PREFIXES}, | {"l_prefix", 0x100+LOPT_PREFIXES}, | ||||
{"l_regressive_v", 0x100+LOPT_REGRESSIVE_VOICING}, | {"l_regressive_v", 0x100+LOPT_REGRESSIVE_VOICING}, | ||||
{"l_unpronouncable", 0x100+LOPT_UNPRONOUNCABLE}, | {"l_unpronouncable", 0x100+LOPT_UNPRONOUNCABLE}, | ||||
int pk; | int pk; | ||||
static unsigned char default_heights[N_PEAKS] = {130,128,120,116,100,100,128,128,128}; // changed for v.1.47 | static unsigned char default_heights[N_PEAKS] = {130,128,120,116,100,100,128,128,128}; // changed for v.1.47 | ||||
static unsigned char default_widths[N_PEAKS] = {140,128,128,160,171,171,128,128,128}; | static unsigned char default_widths[N_PEAKS] = {140,128,128,160,171,171,128,128,128}; | ||||
// static unsigned char default_heights[N_PEAKS] = {128,128,120,120,110,110,128,128,128}; // previous version | |||||
// static unsigned char default_widths[N_PEAKS] = {128,128,128,160,171,171,128,128,128}; | |||||
static int breath_widths[N_PEAKS] = {0,200,200,400,400,400,600,600,600}; | static int breath_widths[N_PEAKS] = {0,200,200,400,400,400,600,600,600}; | ||||
voice->pitch_base = 0x47000; | voice->pitch_base = 0x47000; | ||||
voice->pitch_range = 4104; | voice->pitch_range = 4104; | ||||
// default is: pitch 80,117 | |||||
// voice->pitch_base = 0x47000; | |||||
// voice->pitch_range = 3996; | |||||
voice->formant_factor = 256; | voice->formant_factor = 256; | ||||
voice->speed_percent = 100; | voice->speed_percent = 100; | ||||
// This table provides the opportunity for tone control. | // This table provides the opportunity for tone control. | ||||
// Adjustment of harmonic amplitudes, steps of 8Hz | // Adjustment of harmonic amplitudes, steps of 8Hz | ||||
// value of 128 means no change | // value of 128 means no change | ||||
// memset(voice->tone_adjust,128,sizeof(voice->tone_adjust)); | |||||
SetToneAdjust(voice,tone_points); | SetToneAdjust(voice,tone_points); | ||||
// default values of speed factors | // default values of speed factors |
#include "wave.h" | #include "wave.h" | ||||
#include "debug.h" | #include "debug.h" | ||||
//<Definitions | |||||
#ifdef NEED_STRUCT_TIMESPEC | #ifdef NEED_STRUCT_TIMESPEC | ||||
#define HAVE_STRUCT_TIMESPEC 1 | #define HAVE_STRUCT_TIMESPEC 1 | ||||
struct timespec { | struct timespec { | ||||
#define MAX_SAMPLE_RATE 22050 | #define MAX_SAMPLE_RATE 22050 | ||||
#define FRAMES_PER_BUFFER 512 | #define FRAMES_PER_BUFFER 512 | ||||
#define BUFFER_LENGTH (MAX_SAMPLE_RATE*2*sizeof(uint16_t)) | #define BUFFER_LENGTH (MAX_SAMPLE_RATE*2*sizeof(uint16_t)) | ||||
//#define THRESHOLD (BUFFER_LENGTH/5) | |||||
static char myBuffer[BUFFER_LENGTH]; | static char myBuffer[BUFFER_LENGTH]; | ||||
static char* myRead=NULL; | static char* myRead=NULL; | ||||
static char* myWrite=NULL; | static char* myWrite=NULL; | ||||
static uint32_t myReadPosition = 0; // in ms | static uint32_t myReadPosition = 0; // in ms | ||||
static uint32_t myWritePosition = 0; | static uint32_t myWritePosition = 0; | ||||
//> | |||||
//<init_buffer, get_used_mem | |||||
static void init_buffer() | static void init_buffer() | ||||
{ | { | ||||
myWrite = myBuffer; | myWrite = myBuffer; | ||||
return used; | return used; | ||||
} | } | ||||
//> | |||||
//<start stream | |||||
static void start_stream() | static void start_stream() | ||||
{ | { | ||||
PaError err; | PaError err; | ||||
#endif | #endif | ||||
} | } | ||||
//> | |||||
//<pa_callback | |||||
/* This routine will be called by the PortAudio engine when audio is needed. | /* This routine will be called by the PortAudio engine when audio is needed. | ||||
** It may called at interrupt level on some machines so don't do anything | ** It may called at interrupt level on some machines so don't do anything | ||||
** that could mess up the system like calling malloc() or free(). | ** that could mess up the system like calling malloc() or free(). | ||||
} | } | ||||
char* p = (char*)outputBuffer + aUsedMem; | char* p = (char*)outputBuffer + aUsedMem; | ||||
memset(p, 0, n - aUsedMem); | memset(p, 0, n - aUsedMem); | ||||
// myReadPosition += aUsedMem/(out_channels*sizeof(uint16_t)); | |||||
myRead = aWrite; | myRead = aWrite; | ||||
} | } | ||||
} | } | ||||
size_t aUsedMem = aTopMem + aRest; | size_t aUsedMem = aTopMem + aRest; | ||||
char* p = (char*)outputBuffer + aUsedMem; | char* p = (char*)outputBuffer + aUsedMem; | ||||
memset(p, 0, n - aUsedMem); | memset(p, 0, n - aUsedMem); | ||||
// myReadPosition += aUsedMem/(out_channels*sizeof(uint16_t)); | |||||
myRead = aWrite; | myRead = aWrite; | ||||
} | } | ||||
} | } | ||||
SHOW("pa_callback > myRead=%x\n",(int)myRead); | SHOW("pa_callback > myRead=%x\n",(int)myRead); | ||||
// #if USE_PORTAUDIO == 18 | |||||
// if(aBufferEmpty) | |||||
// { | |||||
// static int end_timer = 0; | |||||
// if(end_timer == 0) | |||||
// end_timer = 4; | |||||
// if(end_timer > 0) | |||||
// { | |||||
// end_timer--; | |||||
// if(end_timer == 0) | |||||
// return(1); | |||||
// } | |||||
// } | |||||
// return(0); | |||||
// #else | |||||
#ifdef ARCH_BIG | #ifdef ARCH_BIG | ||||
{ | { | ||||
// BIG-ENDIAN, swap the order of bytes in each sound sample in the portaudio buffer | // BIG-ENDIAN, swap the order of bytes in each sound sample in the portaudio buffer | ||||
} | } | ||||
#endif | #endif | ||||
return(aResult); | return(aResult); | ||||
//#endif | |||||
} // end of WaveCallBack | } // end of WaveCallBack | ||||
//> | |||||
void wave_flush(void* theHandler) | void wave_flush(void* theHandler) | ||||
{ | { | ||||
if (my_stream_could_start) | if (my_stream_could_start) | ||||
{ | { | ||||
// #define buf 1024 | |||||
// static char a_buffer[buf*2]; | |||||
// memset(a_buffer,0,buf*2); | |||||
// wave_write(theHandler, a_buffer, buf*2); | |||||
start_stream(); | start_stream(); | ||||
} | } | ||||
} | } | ||||
//<wave_open_sound | |||||
static int wave_open_sound() | static int wave_open_sound() | ||||
{ | { | ||||
ENTER("wave_open_sound"); | ENTER("wave_open_sound"); | ||||
out_channels = 1; | out_channels = 1; | ||||
#if USE_PORTAUDIO == 18 | #if USE_PORTAUDIO == 18 | ||||
// err = Pa_OpenDefaultStream(&pa_stream,0,1,paInt16,wave_samplerate,FRAMES_PER_BUFFER,N_WAV_BUF,pa_callback,(void *)userdata); | |||||
PaDeviceID playbackDevice = Pa_GetDefaultOutputDeviceID(); | PaDeviceID playbackDevice = Pa_GetDefaultOutputDeviceID(); | ||||
NULL, | NULL, | ||||
/* general parameters */ | /* general parameters */ | ||||
wave_samplerate, FRAMES_PER_BUFFER, 0, | wave_samplerate, FRAMES_PER_BUFFER, 0, | ||||
//paClipOff | paDitherOff, | |||||
paNoFlag, | paNoFlag, | ||||
pa_callback, (void *)userdata); | pa_callback, (void *)userdata); | ||||
SHOW_TIME("wave_open_sound > try stereo"); | SHOW_TIME("wave_open_sound > try stereo"); | ||||
// failed to open with mono, try stereo | // failed to open with mono, try stereo | ||||
out_channels = 2; | out_channels = 2; | ||||
// myOutputParameters.channelCount = out_channels; | |||||
PaError err = Pa_OpenStream( &pa_stream, | PaError err = Pa_OpenStream( &pa_stream, | ||||
/* capture parameters */ | /* capture parameters */ | ||||
paNoDevice, | paNoDevice, | ||||
NULL, | NULL, | ||||
/* general parameters */ | /* general parameters */ | ||||
wave_samplerate, FRAMES_PER_BUFFER, 0, | wave_samplerate, FRAMES_PER_BUFFER, 0, | ||||
//paClipOff | paDitherOff, | |||||
paNoFlag, | paNoFlag, | ||||
pa_callback, (void *)userdata); | pa_callback, (void *)userdata); | ||||
// err = Pa_OpenDefaultStream(&pa_stream,0,2,paInt16, | |||||
// wave_samplerate, | |||||
// FRAMES_PER_BUFFER, | |||||
// N_WAV_BUF,pa_callback,(void *)userdata); | |||||
SHOW("wave_open_sound > Pa_OpenDefaultStream(2): err=%d (%s)\n",err, Pa_GetErrorText(err)); | SHOW("wave_open_sound > Pa_OpenDefaultStream(2): err=%d (%s)\n",err, Pa_GetErrorText(err)); | ||||
err=0; // avoid warning | err=0; // avoid warning | ||||
} | } | ||||
wave_samplerate, | wave_samplerate, | ||||
framesPerBuffer, | framesPerBuffer, | ||||
paNoFlag, | paNoFlag, | ||||
// paClipOff | paDitherOff, | |||||
pa_callback, | pa_callback, | ||||
(void *)userdata); | (void *)userdata); | ||||
if ((err!=paNoError) | if ((err!=paNoError) | ||||
// paClipOff | paDitherOff, | // paClipOff | paDitherOff, | ||||
pa_callback, | pa_callback, | ||||
(void *)userdata); | (void *)userdata); | ||||
// err = Pa_OpenDefaultStream(&pa_stream,0,2,paInt16,(double)wave_samplerate,FRAMES_PER_BUFFER,pa_callback,(void *)userdata); | |||||
} | } | ||||
mInCallbackFinishedState = false; | mInCallbackFinishedState = false; | ||||
#endif | #endif | ||||
return (err != paNoError); | return (err != paNoError); | ||||
} | } | ||||
//> | |||||
//<select_device | |||||
#if (USE_PORTAUDIO == 19) | #if (USE_PORTAUDIO == 19) | ||||
static void update_output_parameters(int selectedDevice, const PaDeviceInfo *deviceInfo) | static void update_output_parameters(int selectedDevice, const PaDeviceInfo *deviceInfo) | ||||
{ | { | ||||
// const PaDeviceInfo *pdi = Pa_GetDeviceInfo(i); | |||||
myOutputParameters.device = selectedDevice; | myOutputParameters.device = selectedDevice; | ||||
// myOutputParameters.channelCount = pdi->maxOutputChannels; | |||||
myOutputParameters.channelCount = 1; | myOutputParameters.channelCount = 1; | ||||
myOutputParameters.sampleFormat = paInt16; | myOutputParameters.sampleFormat = paInt16; | ||||
if (deviceInfo) | if (deviceInfo) | ||||
{ | { | ||||
double aLatency = deviceInfo->defaultLowOutputLatency; | double aLatency = deviceInfo->defaultLowOutputLatency; | ||||
// double aCoeff = round(0.100 / aLatency); | |||||
// myOutputParameters.suggestedLatency = aCoeff * aLatency; // to avoid glitches ? | |||||
myOutputParameters.suggestedLatency = aLatency; // for faster response ? | myOutputParameters.suggestedLatency = aLatency; // for faster response ? | ||||
SHOW("Device=%d, myOutputParameters.suggestedLatency=%f, aCoeff=%f\n", | SHOW("Device=%d, myOutputParameters.suggestedLatency=%f, aCoeff=%f\n", | ||||
selectedDevice, | selectedDevice, | ||||
selectedDevice, | selectedDevice, | ||||
myOutputParameters.suggestedLatency); | myOutputParameters.suggestedLatency); | ||||
} | } | ||||
//pdi->defaultLowOutputLatency; | |||||
myOutputParameters.hostApiSpecificStreamInfo = NULL; | myOutputParameters.hostApiSpecificStreamInfo = NULL; | ||||
} | } | ||||
#endif | #endif | ||||
} | } | ||||
//> | |||||
// int wave_Close(void* theHandler) | |||||
// { | |||||
// SHOW_TIME("WaveCloseSound"); | |||||
// // PaError active; | |||||
// // check whether speaking has finished, and close the stream | |||||
// if(pa_stream != NULL) | |||||
// { | |||||
// Pa_CloseStream(pa_stream); | |||||
// pa_stream = NULL; | |||||
// init_buffer(); | |||||
// // #if USE_PORTAUDIO == 18 | |||||
// // active = Pa_StreamActive(pa_stream); | |||||
// // #else | |||||
// // active = Pa_IsStreamActive(pa_stream); | |||||
// // #endif | |||||
// // if(active == 0) | |||||
// // { | |||||
// // SHOW_TIME("WaveCloseSound > ok, not active"); | |||||
// // Pa_CloseStream(pa_stream); | |||||
// // pa_stream = NULL; | |||||
// // return(1); | |||||
// // } | |||||
// } | |||||
// return(0); | |||||
// } | |||||
//<wave_set_callback_is_output_enabled | |||||
void wave_set_callback_is_output_enabled(t_wave_callback* cb) | void wave_set_callback_is_output_enabled(t_wave_callback* cb) | ||||
{ | { | ||||
my_callback_is_output_enabled = cb; | my_callback_is_output_enabled = cb; | ||||
} | } | ||||
//> | |||||
//<wave_init | |||||
// TBD: the arg could be "alsa", "oss",... | |||||
int wave_init(int srate) | int wave_init(int srate) | ||||
{ | { | ||||
ENTER("wave_init"); | ENTER("wave_init"); | ||||
return err == paNoError; | return err == paNoError; | ||||
} | } | ||||
//> | |||||
//<wave_open | |||||
void* wave_open(const char* the_api) | void* wave_open(const char* the_api) | ||||
{ | { | ||||
ENTER("wave_open"); | ENTER("wave_open"); | ||||
static int once=0; | static int once=0; | ||||
// TBD: the_api (e.g. "alsa") is not used at the moment | |||||
// select_device is called once | |||||
if (!once) | if (!once) | ||||
{ | { | ||||
select_device("alsa"); | select_device("alsa"); | ||||
return((void*)1); | return((void*)1); | ||||
} | } | ||||
//> | |||||
//<copyBuffer | |||||
static size_t copyBuffer(char* dest, char* src, const size_t theSizeInBytes) | static size_t copyBuffer(char* dest, char* src, const size_t theSizeInBytes) | ||||
{ | { | ||||
size_t bytes_written = 0; | size_t bytes_written = 0; | ||||
return bytes_written; | return bytes_written; | ||||
} | } | ||||
//> | |||||
//<wave_write | |||||
size_t wave_write(void* theHandler, char* theMono16BitsWaveBuffer, size_t theSize) | size_t wave_write(void* theHandler, char* theMono16BitsWaveBuffer, size_t theSize) | ||||
{ | { | ||||
ENTER("wave_write"); | ENTER("wave_write"); | ||||
break; | break; | ||||
} // end if (aTotalFreeMem >= bytes_to_write) | } // end if (aTotalFreeMem >= bytes_to_write) | ||||
//SHOW_TIME("wave_write > wait"); | |||||
SHOW("wave_write > wait: aTotalFreeMem=%d\n", aTotalFreeMem); | SHOW("wave_write > wait: aTotalFreeMem=%d\n", aTotalFreeMem); | ||||
SHOW("wave_write > aRead=%x, myWrite=%x\n", (int)aRead, (int)myWrite); | SHOW("wave_write > aRead=%x, myWrite=%x\n", (int)aRead, (int)myWrite); | ||||
usleep(10000); | usleep(10000); | ||||
return bytes_written; | return bytes_written; | ||||
} | } | ||||
//> | |||||
//<wave_close | |||||
int wave_close(void* theHandler) | int wave_close(void* theHandler) | ||||
{ | { | ||||
SHOW_TIME("wave_close > ENTER"); | SHOW_TIME("wave_close > ENTER"); | ||||
return 0; | return 0; | ||||
} | } | ||||
// int wave_close(void* theHandler) | |||||
// { | |||||
// ENTER("wave_close"); | |||||
// if(pa_stream != NULL) | |||||
// { | |||||
// PaError err = Pa_AbortStream(pa_stream); | |||||
// SHOW_TIME("wave_close > Pa_AbortStream (end)"); | |||||
// SHOW("wave_close Pa_AbortStream > err=%d\n",err); | |||||
// while(1) | |||||
// { | |||||
// PaError active; | |||||
// #if USE_PORTAUDIO == 18 | |||||
// active = Pa_StreamActive(pa_stream); | |||||
// #else | |||||
// active = Pa_IsStreamActive(pa_stream); | |||||
// #endif | |||||
// if (active != 1) | |||||
// { | |||||
// break; | |||||
// } | |||||
// SHOW("wave_close > active=%d\n",err); | |||||
// usleep(10000); /* sleep until playback has finished */ | |||||
// } | |||||
// err = Pa_CloseStream( pa_stream ); | |||||
// SHOW_TIME("wave_close > Pa_CloseStream (end)"); | |||||
// SHOW("wave_close Pa_CloseStream > err=%d\n",err); | |||||
// pa_stream = NULL; | |||||
// init_buffer(); | |||||
// } | |||||
// return 0; | |||||
// } | |||||
//> | |||||
//<wave_is_busy | |||||
int wave_is_busy(void* theHandler) | int wave_is_busy(void* theHandler) | ||||
{ | { | ||||
PaError active=0; | PaError active=0; | ||||
return (active==1); | return (active==1); | ||||
} | } | ||||
//> | |||||
//<wave_terminate | |||||
void wave_terminate() | void wave_terminate() | ||||
{ | { | ||||
ENTER("wave_terminate"); | ENTER("wave_terminate"); | ||||
} | } | ||||
//> | |||||
//<wave_get_read_position, wave_get_write_position, wave_get_remaining_time | |||||
uint32_t wave_get_read_position(void* theHandler) | uint32_t wave_get_read_position(void* theHandler) | ||||
{ | { | ||||
SHOW("wave_get_read_position > myReadPosition=%u\n", myReadPosition); | SHOW("wave_get_read_position > myReadPosition=%u\n", myReadPosition); | ||||
return 0; | return 0; | ||||
} | } | ||||
//> | |||||
//<wave_test_get_write_buffer | |||||
void *wave_test_get_write_buffer() | void *wave_test_get_write_buffer() | ||||
{ | { | ||||
return myWrite; | return myWrite; | ||||
#else | #else | ||||
// notdef USE_PORTAUDIO | |||||
int wave_init(int srate) {return 1;} | int wave_init(int srate) {return 1;} | ||||
#endif // of USE_PORTAUDIO | #endif // of USE_PORTAUDIO | ||||
//> | |||||
//<clock_gettime2, add_time_in_ms | |||||
void clock_gettime2(struct timespec *ts) | void clock_gettime2(struct timespec *ts) | ||||
{ | { | ||||
struct timeval tv; | struct timeval tv; | ||||
#endif // USE_ASYNC | #endif // USE_ASYNC | ||||
//> |
extern int option_device_number; | extern int option_device_number; | ||||
extern int wave_init(int samplerate); | extern int wave_init(int samplerate); | ||||
// TBD: the arg could be "alsa", "oss",... | |||||
extern void* wave_open(const char* the_api); | extern void* wave_open(const char* the_api); | ||||
extern size_t wave_write(void* theHandler, char* theMono16BitsWaveBuffer, size_t theSize); | extern size_t wave_write(void* theHandler, char* theMono16BitsWaveBuffer, size_t theSize); |
#include "wave.h" | #include "wave.h" | ||||
#include "debug.h" | #include "debug.h" | ||||
//<Definitions | |||||
enum {ONE_BILLION=1000000000}; | enum {ONE_BILLION=1000000000}; | ||||
enum { | enum { | ||||
// /* 100ms. | |||||
// If a greater value is set (several seconds), | |||||
// please update _pulse_timeout_start accordingly */ | |||||
// PULSE_TIMEOUT_IN_USEC = 100000, | |||||
/* return value */ | /* return value */ | ||||
PULSE_OK = 0, | PULSE_OK = 0, | ||||
PULSE_ERROR = -1, | PULSE_ERROR = -1, | ||||
if (!connected){ SHOW("CHECK_CONNECTED_NO_RETVAL: !pulse_connected\n", ""); return; } \ | if (!connected){ SHOW("CHECK_CONNECTED_NO_RETVAL: !pulse_connected\n", ""); return; } \ | ||||
} while (0); | } while (0); | ||||
//> | |||||
// static void display_timing_info(const pa_timing_info* the_time) | |||||
// { | |||||
// const struct timeval *tv=&(the_time->timestamp); | |||||
// SHOW_TIME("ti>"); | |||||
// SHOW("ti> timestamp=%03d.%03dms\n",(int)(tv->tv_sec%1000), (int)(tv->tv_usec/1000)); | |||||
// SHOW("ti> synchronized_clocks=%d\n",the_time->synchronized_clocks); | |||||
// SHOW("ti> sink_usec=%ld\n",the_time->sink_usec); | |||||
// SHOW("ti> source_usec=%ld\n",the_time->source_usec); | |||||
// SHOW("ti> transport=%ld\n",the_time->transport_usec); | |||||
// SHOW("ti> playing=%d\n",the_time->playing); | |||||
// SHOW("ti> write_index_corrupt=%d\n",the_time->write_index_corrupt); | |||||
// SHOW("ti> write_index=0x%lx\n",the_time->write_index); | |||||
// SHOW("ti> read_index_corrupt=%d\n",the_time->read_index_corrupt); | |||||
// SHOW("ti> read_index=0x%lx\n",the_time->read_index); | |||||
// } | |||||
static void subscribe_cb(struct pa_context *c, enum pa_subscription_event_type t, uint32_t index, void *userdata) { | static void subscribe_cb(struct pa_context *c, enum pa_subscription_event_type t, uint32_t index, void *userdata) { | ||||
ENTER(__FUNCTION__); | ENTER(__FUNCTION__); | ||||
} | } | ||||
static void stream_latency_update_cb(pa_stream *s, void *userdata) { | static void stream_latency_update_cb(pa_stream *s, void *userdata) { | ||||
// ENTER(__FUNCTION__); | |||||
assert(s); | assert(s); | ||||
pa_threaded_mainloop_signal(mainloop, 0); | pa_threaded_mainloop_signal(mainloop, 0); | ||||
r = i->playing; | r = i->playing; | ||||
memcpy((void*)the_timing_info, (void*)i, sizeof(pa_timing_info)); | memcpy((void*)the_timing_info, (void*)i, sizeof(pa_timing_info)); | ||||
// display_timing_info(i); | |||||
fail: | fail: | ||||
pa_threaded_mainloop_unlock(mainloop); | pa_threaded_mainloop_unlock(mainloop); | ||||
return r; | return r; | ||||
} | } | ||||
// static void pulse_flush(int time) { | |||||
// ENTER(__FUNCTION__); | |||||
// pa_operation *o = NULL; | |||||
// int success = 0; | |||||
// CHECK_CONNECTED(); | |||||
// pa_threaded_mainloop_lock(mainloop); | |||||
// CHECK_DEAD_GOTO(fail, 1); | |||||
// if (!(o = pa_stream_flush(stream, stream_success_cb, &success))) { | |||||
// SHOW("pa_stream_flush() failed: %s", pa_strerror(pa_context_errno(context))); | |||||
// goto fail; | |||||
// } | |||||
// while (pa_operation_get_state(o) != PA_OPERATION_DONE) { | |||||
// CHECK_DEAD_GOTO(fail, 1); | |||||
// pa_threaded_mainloop_wait(mainloop); | |||||
// } | |||||
// if (!success) | |||||
// SHOW("pa_stream_flush() failed: %s", pa_strerror(pa_context_errno(context))); | |||||
// written = (uint64_t) (((double) time * pa_bytes_per_second(pa_stream_get_sample_spec(stream))) / 1000); | |||||
// just_flushed = 1; | |||||
// time_offset_msec = time; | |||||
// fail: | |||||
// if (o) | |||||
// pa_operation_unref(o); | |||||
// pa_threaded_mainloop_unlock(mainloop); | |||||
// } | |||||
static void pulse_write(void* ptr, int length) { | static void pulse_write(void* ptr, int length) { | ||||
ENTER(__FUNCTION__); | ENTER(__FUNCTION__); | ||||
fail: | fail: | ||||
// pulse_close(); | |||||
if (ret == PULSE_NO_CONNECTION) { | if (ret == PULSE_NO_CONNECTION) { | ||||
if (context) { | if (context) { | ||||
SHOW_TIME("pa_context_disconnect (call)"); | SHOW_TIME("pa_context_disconnect (call)"); | ||||
void wave_flush(void* theHandler) | void wave_flush(void* theHandler) | ||||
{ | { | ||||
ENTER("wave_flush"); | ENTER("wave_flush"); | ||||
// if (my_stream_could_start) | |||||
// { | |||||
// // #define buf 1024 | |||||
// // static char a_buffer[buf*2]; | |||||
// // memset(a_buffer,0,buf*2); | |||||
// // wave_write(theHandler, a_buffer, buf*2); | |||||
// start_stream(); | |||||
// } | |||||
} | } | ||||
//<wave_set_callback_is_output_enabled | |||||
void wave_set_callback_is_output_enabled(t_wave_callback* cb) | void wave_set_callback_is_output_enabled(t_wave_callback* cb) | ||||
{ | { | ||||
my_callback_is_output_enabled = cb; | my_callback_is_output_enabled = cb; | ||||
} | } | ||||
//> | |||||
//<wave_init | |||||
int wave_init(int srate) | int wave_init(int srate) | ||||
{ | { | ||||
ENTER("wave_init"); | ENTER("wave_init"); | ||||
return pulse_open() == PULSE_OK; | return pulse_open() == PULSE_OK; | ||||
} | } | ||||
//> | |||||
//<wave_open | |||||
void* wave_open(const char* the_api) | void* wave_open(const char* the_api) | ||||
{ | { | ||||
ENTER("wave_open"); | ENTER("wave_open"); | ||||
return((void*)1); | return((void*)1); | ||||
} | } | ||||
//> | |||||
//<wave_write | |||||
size_t wave_write(void* theHandler, char* theMono16BitsWaveBuffer, size_t theSize) | size_t wave_write(void* theHandler, char* theMono16BitsWaveBuffer, size_t theSize) | ||||
{ | { | ||||
ENTER("wave_write"); | ENTER("wave_write"); | ||||
return theSize; | return theSize; | ||||
} | } | ||||
//> | |||||
//<wave_close | |||||
int wave_close(void* theHandler) | int wave_close(void* theHandler) | ||||
{ | { | ||||
SHOW_TIME("wave_close > ENTER"); | SHOW_TIME("wave_close > ENTER"); | ||||
return PULSE_OK; | return PULSE_OK; | ||||
} | } | ||||
//> | |||||
//<wave_is_busy | |||||
int wave_is_busy(void* theHandler) | int wave_is_busy(void* theHandler) | ||||
{ | { | ||||
SHOW_TIME("wave_is_busy"); | SHOW_TIME("wave_is_busy"); | ||||
return active; | return active; | ||||
} | } | ||||
//> | |||||
//<wave_terminate | |||||
void wave_terminate() | void wave_terminate() | ||||
{ | { | ||||
ENTER("wave_terminate"); | ENTER("wave_terminate"); | ||||
// Pa_Terminate(); | |||||
int a_status; | int a_status; | ||||
pthread_mutex_t* a_mutex = NULL; | pthread_mutex_t* a_mutex = NULL; | ||||
a_mutex = &pulse_mutex; | a_mutex = &pulse_mutex; | ||||
pthread_mutex_destroy(a_mutex); | pthread_mutex_destroy(a_mutex); | ||||
} | } | ||||
//> | |||||
//<wave_get_read_position, wave_get_write_position, wave_get_remaining_time | |||||
uint32_t wave_get_read_position(void* theHandler) | uint32_t wave_get_read_position(void* theHandler) | ||||
{ | { | ||||
pa_timing_info a_timing_info; | pa_timing_info a_timing_info; | ||||
return 0; | return 0; | ||||
} | } | ||||
//> | |||||
//<wave_test_get_write_buffer | |||||
void *wave_test_get_write_buffer() | void *wave_test_get_write_buffer() | ||||
{ | { | ||||
return NULL; | return NULL; | ||||
#else | #else | ||||
// notdef USE_PULSEAUDIO | |||||
int wave_init(return 1;) {} | int wave_init(return 1;) {} | ||||
#endif // of USE_PULSEAUDIO | #endif // of USE_PULSEAUDIO | ||||
#ifndef USE_PORTAUDIO | #ifndef USE_PORTAUDIO | ||||
//> | |||||
//<clock_gettime2, add_time_in_ms | |||||
void clock_gettime2(struct timespec *ts) | void clock_gettime2(struct timespec *ts) | ||||
{ | { | ||||
#endif // USE_ASYNC | #endif // USE_ASYNC | ||||
//> |
static uint32_t last_play_position=0; | static uint32_t last_play_position=0; | ||||
static uint32_t wave_samplerate; | static uint32_t wave_samplerate; | ||||
//> | |||||
// wave_init | // wave_init | ||||
// | // | ||||
// DESCRIPTION: | // DESCRIPTION: | ||||
// sun_audio_fd: modified to hold the file descriptor of the opened | // sun_audio_fd: modified to hold the file descriptor of the opened | ||||
// audio device. | // audio device. | ||||
// | // | ||||
//<wave_init | |||||
int wave_init(int srate) { | int wave_init(int srate) { | ||||
ENTER("wave_init"); | ENTER("wave_init"); | ||||
return(1); | return(1); | ||||
} | } | ||||
//> | |||||
// wave_open | // wave_open | ||||
// | // | ||||
// DESCRIPTION: | // DESCRIPTION: | ||||
// sun_audio_fd opened in wave_init, which is passed in as theHandler | // sun_audio_fd opened in wave_init, which is passed in as theHandler | ||||
// parameter in all other methods | // parameter in all other methods | ||||
// | // | ||||
//<wave_open | |||||
void* wave_open(const char* the_api) | void* wave_open(const char* the_api) | ||||
{ | { | ||||
ENTER("wave_open"); | ENTER("wave_open"); | ||||
return((void*) sun_audio_fd); | return((void*) sun_audio_fd); | ||||
} | } | ||||
//> | |||||
// wave_write | // wave_write | ||||
// | // | ||||
// DESCRIPTION: | // DESCRIPTION: | ||||
// | // | ||||
// the number of bytes (not 16-bit samples) sent | // the number of bytes (not 16-bit samples) sent | ||||
// | // | ||||
//<wave_write | |||||
size_t wave_write(void* theHandler, | size_t wave_write(void* theHandler, | ||||
char* theMono16BitsWaveBuffer, | char* theMono16BitsWaveBuffer, | ||||
size_t theSize) | size_t theSize) | ||||
return num; | return num; | ||||
} | } | ||||
//> | |||||
// wave_close | // wave_close | ||||
// | // | ||||
// DESCRIPTION: | // DESCRIPTION: | ||||
// | // | ||||
// The result of the ioctl call (non-0 means failure) | // The result of the ioctl call (non-0 means failure) | ||||
// | // | ||||
//<wave_close | |||||
int wave_close(void* theHandler) | int wave_close(void* theHandler) | ||||
{ | { | ||||
int ret; | int ret; | ||||
return ret; | return ret; | ||||
} | } | ||||
//> | |||||
// wave_is_busy | // wave_is_busy | ||||
// | // | ||||
// DESCRIPTION: | // DESCRIPTION: | ||||
// | // | ||||
// A non-0 value if audio is being played | // A non-0 value if audio is being played | ||||
// | // | ||||
//<wave_is_busy | |||||
int wave_is_busy(void* theHandler) | int wave_is_busy(void* theHandler) | ||||
{ | { | ||||
uint32_t time; | uint32_t time; | ||||
return time != 0; | return time != 0; | ||||
} | } | ||||
//> | |||||
// wave_terminate | // wave_terminate | ||||
// | // | ||||
// DESCRIPTION: | // DESCRIPTION: | ||||
// | // | ||||
// sun_audio_fd: modified - closed and set to -1 | // sun_audio_fd: modified - closed and set to -1 | ||||
// | // | ||||
//<wave_terminate | |||||
void wave_terminate() | void wave_terminate() | ||||
{ | { | ||||
ENTER("wave_terminate"); | ENTER("wave_terminate"); | ||||
SHOW_TIME("wave_terminate > LEAVE"); | SHOW_TIME("wave_terminate > LEAVE"); | ||||
} | } | ||||
//> | |||||
// wave_flush | // wave_flush | ||||
// | // | ||||
// DESCRIPTION: | // DESCRIPTION: | ||||
// | // | ||||
// theHandler: the audio device file descriptor | // theHandler: the audio device file descriptor | ||||
// | // | ||||
//<wave_flush | |||||
void wave_flush(void* theHandler) | void wave_flush(void* theHandler) | ||||
{ | { | ||||
ENTER("wave_flush"); | ENTER("wave_flush"); | ||||
//ioctl((int) theHandler, AUDIO_DRAIN, 0); | |||||
SHOW_TIME("wave_flush > LEAVE"); | SHOW_TIME("wave_flush > LEAVE"); | ||||
} | } | ||||
//> | |||||
// wave_set_callback_is_output_enabled | // wave_set_callback_is_output_enabled | ||||
// | // | ||||
// DESCRIPTION: | // DESCRIPTION: | ||||
// | // | ||||
// cb: the callback to call from wave_write | // cb: the callback to call from wave_write | ||||
// | // | ||||
//<wave_set_callback_is_output_enabled | |||||
void wave_set_callback_is_output_enabled(t_wave_callback* cb) | void wave_set_callback_is_output_enabled(t_wave_callback* cb) | ||||
{ | { | ||||
my_callback_is_output_enabled = cb; | my_callback_is_output_enabled = cb; | ||||
} | } | ||||
//> | |||||
// wave_test_get_write_buffer | // wave_test_get_write_buffer | ||||
// | // | ||||
// DESCRIPTION: | // DESCRIPTION: | ||||
// | // | ||||
// NULL | // NULL | ||||
// | // | ||||
//<wave_test_get_write_buffer | |||||
void *wave_test_get_write_buffer() | void *wave_test_get_write_buffer() | ||||
{ | { | ||||
return NULL; | return NULL; | ||||
} | } | ||||
//> | |||||
// wave_get_read_position | // wave_get_read_position | ||||
// | // | ||||
// DESCRIPTION: | // DESCRIPTION: | ||||
// The total number of 16-bit samples played by the audio system | // The total number of 16-bit samples played by the audio system | ||||
// so far. | // so far. | ||||
// | // | ||||
//<wave_get_read_position | |||||
uint32_t wave_get_read_position(void* theHandler) | uint32_t wave_get_read_position(void* theHandler) | ||||
{ | { | ||||
audio_info_t ainfo; | audio_info_t ainfo; | ||||
return ainfo.play.samples; | return ainfo.play.samples; | ||||
} | } | ||||
//> | |||||
// wave_get_write_position | // wave_get_write_position | ||||
// | // | ||||
// DESCRIPTION: | // DESCRIPTION: | ||||
// the index wraps back to 0. We don't handle that wrapping, so | // the index wraps back to 0. We don't handle that wrapping, so | ||||
// the behavior after 54 hours of play time is undefined.]]] | // the behavior after 54 hours of play time is undefined.]]] | ||||
// | // | ||||
//<wave_get_write_position | |||||
uint32_t wave_get_write_position(void* theHandler) | uint32_t wave_get_write_position(void* theHandler) | ||||
{ | { | ||||
ENTER("wave_get_write_position"); | ENTER("wave_get_write_position"); | ||||
return total_samples_sent; | return total_samples_sent; | ||||
} | } | ||||
//> | |||||
// wave_get_remaining_time | // wave_get_remaining_time | ||||
// | // | ||||
// DESCRIPTION: | // DESCRIPTION: | ||||
// Time in milliseconds before the sample is played or 0 if the sample | // Time in milliseconds before the sample is played or 0 if the sample | ||||
// is currently playing or has already been played. | // is currently playing or has already been played. | ||||
// | // | ||||
//<wave_get_remaining_time | |||||
int wave_get_remaining_time(uint32_t sample, uint32_t* time) | int wave_get_remaining_time(uint32_t sample, uint32_t* time) | ||||
{ | { | ||||
uint32_t a_time=0; | uint32_t a_time=0; | ||||
} | } | ||||
#else | #else | ||||
// notdef USE_SADA | |||||
init wave_init() {return 1;} | init wave_init() {return 1;} | ||||
void* wave_open(const char* the_api) {return (void *)1;} | void* wave_open(const char* the_api) {return (void *)1;} | ||||
#endif // of USE_PORTAUDIO | #endif // of USE_PORTAUDIO | ||||
//> | |||||
//<clock_gettime2, add_time_in_ms | |||||
void clock_gettime2(struct timespec *ts) | void clock_gettime2(struct timespec *ts) | ||||
{ | { | ||||
struct timeval tv; | struct timeval tv; | ||||
} | } | ||||
#endif // USE_ASYNC | #endif // USE_ASYNC | ||||
//> |
// restrict highest harmonic to half the samplerate | // restrict highest harmonic to half the samplerate | ||||
hmax_samplerate = (((samplerate * 19)/40) << 16)/pitch; // only 95% of Nyquist freq | hmax_samplerate = (((samplerate * 19)/40) << 16)/pitch; // only 95% of Nyquist freq | ||||
// hmax_samplerate = (samplerate << 16)/(pitch*2); | |||||
if(hmax > hmax_samplerate) | if(hmax > hmax_samplerate) | ||||
hmax = hmax_samplerate; | hmax = hmax_samplerate; | ||||
rp->x2 = 0; | rp->x2 = 0; | ||||
} | } | ||||
// x = exp(-pi * bwidth * t) | |||||
arg = minus_pi_t * bwidth; | arg = minus_pi_t * bwidth; | ||||
x = exp(arg); | x = exp(arg); | ||||
// c = -(x*x) | |||||
rp->c = -(x * x); | rp->c = -(x * x); | ||||
// b = x * 2*cos(2 pi * freq * t) | |||||
arg = two_pi_t * freq; | arg = two_pi_t * freq; | ||||
rp->b = x * cos(arg) * 2.0; | rp->b = x * cos(arg) * 2.0; | ||||
// a = 1.0 - b - c | |||||
rp->a = 1.0 - rp->b - rp->c; | rp->a = 1.0 - rp->b - rp->c; | ||||
} // end if setresonator | } // end if setresonator | ||||
#endif | #endif | ||||
maxh2 = PeaksToHarmspect(peaks, wdata.pitch<<4, hspect[0], 0); | maxh2 = PeaksToHarmspect(peaks, wdata.pitch<<4, hspect[0], 0); | ||||
// adjust amplitude to compensate for fewer harmonics at higher pitch | // adjust amplitude to compensate for fewer harmonics at higher pitch | ||||
// amplitude2 = (wdata.amplitude * wdata.pitch)/(100 << 11); | |||||
amplitude2 = (wdata.amplitude * (wdata.pitch >> 8) * wdata.amplitude_fmt)/(10000 << 3); | amplitude2 = (wdata.amplitude * (wdata.pitch >> 8) * wdata.amplitude_fmt)/(10000 << 3); | ||||
// switch sign of harmonics above about 900Hz, to reduce max peak amplitude | // switch sign of harmonics above about 900Hz, to reduce max peak amplitude | ||||
} | } | ||||
// adjust amplitude to compensate for fewer harmonics at higher pitch | // adjust amplitude to compensate for fewer harmonics at higher pitch | ||||
// amplitude2 = (wdata.amplitude * wdata.pitch)/(100 << 11); | |||||
amplitude2 = (wdata.amplitude * (wdata.pitch >> 8) * wdata.amplitude_fmt)/(10000 << 3); | amplitude2 = (wdata.amplitude * (wdata.pitch >> 8) * wdata.amplitude_fmt)/(10000 << 3); | ||||
if(glottal_flag > 0) | if(glottal_flag > 0) | ||||
if((ix = amp_ix>>8) > 127) ix = 127; | if((ix = amp_ix>>8) > 127) ix = 127; | ||||
amp = amplitude_env[ix]; | amp = amplitude_env[ix]; | ||||
amplitude2 = (amplitude2 * amp)/128; | amplitude2 = (amplitude2 * amp)/128; | ||||
// if(amp < 255) | |||||
// modulation_type = 7; | |||||
} | } | ||||
// introduce roughness into the sound by reducing the amplitude of | // introduce roughness into the sound by reducing the amplitude of | ||||
WavegenSetEcho(); | WavegenSetEcho(); | ||||
SetPitchFormants(); | SetPitchFormants(); | ||||
MarkerEvent(espeakEVENT_SAMPLERATE, 0, wvoice->samplerate, 0, out_ptr); | MarkerEvent(espeakEVENT_SAMPLERATE, 0, wvoice->samplerate, 0, out_ptr); | ||||
// WVoiceChanged(wvoice); | |||||
} | } | ||||
int qix; | int qix; | ||||
int cmd; | int cmd; | ||||
static int glottal_reduce_tab1[4] = {0x30, 0x30, 0x40, 0x50}; // vowel before [?], amp * 1/256 | static int glottal_reduce_tab1[4] = {0x30, 0x30, 0x40, 0x50}; // vowel before [?], amp * 1/256 | ||||
// static int glottal_reduce_tab1[4] = {0x30, 0x40, 0x50, 0x60}; // vowel before [?], amp * 1/256 | |||||
static int glottal_reduce_tab2[4] = {0x90, 0xa0, 0xb0, 0xc0}; // vowel after [?], amp * 1/256 | static int glottal_reduce_tab2[4] = {0x90, 0xa0, 0xb0, 0xc0}; // vowel after [?], amp * 1/256 | ||||
#ifdef LOG_FRAMES | #ifdef LOG_FRAMES |