Browse Source

tokenizer.c: Support general symbol tokens.

master
Reece H. Dunn 8 years ago
parent
commit
dd90d3812d
3 changed files with 62 additions and 0 deletions
  1. 1
    0
      src/include/espeak-ng/tokenizer.h
  2. 9
    0
      src/libespeak-ng/tokenizer.c
  3. 52
    0
      tests/tokenizer.c

+ 1
- 0
src/include/espeak-ng/tokenizer.h View File

ESPEAKNG_TOKEN_SEMICOLON, ESPEAKNG_TOKEN_SEMICOLON,
ESPEAKNG_TOKEN_ELLIPSIS, ESPEAKNG_TOKEN_ELLIPSIS,
ESPEAKNG_TOKEN_PUNCTUATION, ESPEAKNG_TOKEN_PUNCTUATION,
ESPEAKNG_TOKEN_SYMBOL,
} espeak_ng_TOKEN_TYPE; } espeak_ng_TOKEN_TYPE;


ESPEAK_NG_API espeak_ng_TOKEN_TYPE ESPEAK_NG_API espeak_ng_TOKEN_TYPE

+ 9
- 0
src/libespeak-ng/tokenizer.c View File

ESPEAKNG_CTYPE_SEMICOLON, ESPEAKNG_CTYPE_SEMICOLON,
ESPEAKNG_CTYPE_ELLIPSIS, ESPEAKNG_CTYPE_ELLIPSIS,
ESPEAKNG_CTYPE_PUNCTUATION, ESPEAKNG_CTYPE_PUNCTUATION,
ESPEAKNG_CTYPE_SYMBOL,
} espeakng_CTYPE; } espeakng_CTYPE;


#define ESPEAKNG_CTYPE_PROPERTY_MASK 0xFE0000000000C001ull #define ESPEAKNG_CTYPE_PROPERTY_MASK 0xFE0000000000C001ull
case UCD_CATEGORY_Pi: return ESPEAKNG_CTYPE_PUNCTUATION; case UCD_CATEGORY_Pi: return ESPEAKNG_CTYPE_PUNCTUATION;
case UCD_CATEGORY_Po: return ESPEAKNG_CTYPE_PUNCTUATION; case UCD_CATEGORY_Po: return ESPEAKNG_CTYPE_PUNCTUATION;
case UCD_CATEGORY_Ps: return ESPEAKNG_CTYPE_PUNCTUATION; case UCD_CATEGORY_Ps: return ESPEAKNG_CTYPE_PUNCTUATION;
case UCD_CATEGORY_Sc: return ESPEAKNG_CTYPE_SYMBOL;
case UCD_CATEGORY_Sk: return ESPEAKNG_CTYPE_SYMBOL;
case UCD_CATEGORY_Sm: return ESPEAKNG_CTYPE_SYMBOL;
case UCD_CATEGORY_So: return ESPEAKNG_CTYPE_SYMBOL;
} }


// 5. Classify the remaining codepoints. // 5. Classify the remaining codepoints.
current += utf8_out(c, current); current += utf8_out(c, current);
*current = '\0'; *current = '\0';
return ESPEAKNG_TOKEN_PUNCTUATION; return ESPEAKNG_TOKEN_PUNCTUATION;
case ESPEAKNG_CTYPE_SYMBOL:
current += utf8_out(c, current);
*current = '\0';
return ESPEAKNG_TOKEN_SYMBOL;
default: default:
current += utf8_out(c, current); current += utf8_out(c, current);
*current = '\0'; *current = '\0';

+ 52
- 0
tests/tokenizer.c View File

destroy_tokenizer(tokenizer); destroy_tokenizer(tokenizer);
} }


void
test_Latn_symbol_tokens()
{
printf("testing Latin (Latn) script symbol tokens\n");

espeak_ng_TOKENIZER *tokenizer = create_tokenizer();
espeak_ng_TEXT_DECODER *decoder = create_text_decoder();

assert(text_decoder_decode_string(decoder, "$ ^ + \xC2\xA9", -1, ESPEAKNG_ENCODING_UTF_8) == ENS_OK);
assert(tokenizer_reset(tokenizer, decoder, ESPEAKNG_TOKENIZER_OPTION_TEXT) == 1);

// General Category: Sc
assert(tokenizer_read_next_token(tokenizer) == ESPEAKNG_TOKEN_SYMBOL);
assert(tokenizer_get_token_text(tokenizer) != NULL);
assert(strcmp(tokenizer_get_token_text(tokenizer), "$") == 0);

assert(tokenizer_read_next_token(tokenizer) == ESPEAKNG_TOKEN_WHITESPACE);
assert(tokenizer_get_token_text(tokenizer) != NULL);
assert(strcmp(tokenizer_get_token_text(tokenizer), " ") == 0);

// General Category: Sk
assert(tokenizer_read_next_token(tokenizer) == ESPEAKNG_TOKEN_SYMBOL);
assert(tokenizer_get_token_text(tokenizer) != NULL);
assert(strcmp(tokenizer_get_token_text(tokenizer), "^") == 0);

assert(tokenizer_read_next_token(tokenizer) == ESPEAKNG_TOKEN_WHITESPACE);
assert(tokenizer_get_token_text(tokenizer) != NULL);
assert(strcmp(tokenizer_get_token_text(tokenizer), " ") == 0);

// General Category: Sm
assert(tokenizer_read_next_token(tokenizer) == ESPEAKNG_TOKEN_SYMBOL);
assert(tokenizer_get_token_text(tokenizer) != NULL);
assert(strcmp(tokenizer_get_token_text(tokenizer), "+") == 0);

assert(tokenizer_read_next_token(tokenizer) == ESPEAKNG_TOKEN_WHITESPACE);
assert(tokenizer_get_token_text(tokenizer) != NULL);
assert(strcmp(tokenizer_get_token_text(tokenizer), " ") == 0);

// General Category: So, COPYRIGHT SIGN [U+00A9]
assert(tokenizer_read_next_token(tokenizer) == ESPEAKNG_TOKEN_SYMBOL);
assert(tokenizer_get_token_text(tokenizer) != NULL);
assert(strcmp(tokenizer_get_token_text(tokenizer), "\xC2\xA9") == 0);

assert(tokenizer_read_next_token(tokenizer) == ESPEAKNG_TOKEN_END_OF_BUFFER);
assert(tokenizer_get_token_text(tokenizer) != NULL);
assert(*tokenizer_get_token_text(tokenizer) == '\0');

destroy_text_decoder(decoder);
destroy_tokenizer(tokenizer);
}

void void
run_tests() run_tests()
{ {
test_Latn_word_tokens(); test_Latn_word_tokens();
test_Latn_punctuation_tokens(); test_Latn_punctuation_tokens();
test_Latn_general_punctuation_tokens(); test_Latn_general_punctuation_tokens();
test_Latn_symbol_tokens();


printf("done\n"); printf("done\n");
} }

Loading…
Cancel
Save