{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [], "collapsed_sections": [ "VtxEYym69RUH", "XjAPkfq7SF87" ] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "markdown", "metadata": { "id": "WEY5MiKLzurH" }, "source": [ "# Setup Environment" ] }, { "cell_type": "code", "source": [ "! pip install epitran==1.26.0" ], "metadata": { "id": "jviCS0zCmtJc", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "e8d100ba-e606-4956-ee15-81ccc6557ba6" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Collecting epitran==1.26.0\n", " Downloading epitran-1.26.0-py2.py3-none-any.whl.metadata (34 kB)\n", "Requirement already satisfied: setuptools in /usr/local/lib/python3.11/dist-packages (from epitran==1.26.0) (75.2.0)\n", "Requirement already satisfied: regex in /usr/local/lib/python3.11/dist-packages (from epitran==1.26.0) (2024.11.6)\n", "Collecting panphon>=0.20 (from epitran==1.26.0)\n", " Downloading panphon-0.21.2-py2.py3-none-any.whl.metadata (15 kB)\n", "Requirement already satisfied: marisa-trie in /usr/local/lib/python3.11/dist-packages (from epitran==1.26.0) (1.2.1)\n", "Requirement already satisfied: requests in /usr/local/lib/python3.11/dist-packages (from epitran==1.26.0) (2.32.3)\n", "Collecting jamo (from epitran==1.26.0)\n", " Downloading jamo-0.4.1-py3-none-any.whl.metadata (2.3 kB)\n", "Collecting unicodecsv (from panphon>=0.20->epitran==1.26.0)\n", " Downloading unicodecsv-0.14.1.tar.gz (10 kB)\n", " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", "Requirement already satisfied: PyYAML in /usr/local/lib/python3.11/dist-packages (from panphon>=0.20->epitran==1.26.0) (6.0.2)\n", "Requirement already satisfied: numpy>=1.20.2 in /usr/local/lib/python3.11/dist-packages (from panphon>=0.20->epitran==1.26.0) (2.0.2)\n", "Requirement already satisfied: editdistance in /usr/local/lib/python3.11/dist-packages (from panphon>=0.20->epitran==1.26.0) (0.8.1)\n", "Collecting munkres (from panphon>=0.20->epitran==1.26.0)\n", " Downloading munkres-1.1.4-py2.py3-none-any.whl.metadata (980 bytes)\n", "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/dist-packages (from requests->epitran==1.26.0) (3.4.1)\n", "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.11/dist-packages (from requests->epitran==1.26.0) (3.10)\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.11/dist-packages (from requests->epitran==1.26.0) (2.4.0)\n", "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.11/dist-packages (from requests->epitran==1.26.0) (2025.4.26)\n", "Downloading epitran-1.26.0-py2.py3-none-any.whl (188 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m188.5/188.5 kB\u001b[0m \u001b[31m9.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading panphon-0.21.2-py2.py3-none-any.whl (75 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.4/75.4 kB\u001b[0m \u001b[31m5.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading jamo-0.4.1-py3-none-any.whl (9.5 kB)\n", "Downloading munkres-1.1.4-py2.py3-none-any.whl (7.0 kB)\n", "Building wheels for collected packages: unicodecsv\n", " Building wheel for unicodecsv (setup.py) ... \u001b[?25l\u001b[?25hdone\n", " Created wheel for unicodecsv: filename=unicodecsv-0.14.1-py3-none-any.whl size=10744 sha256=9d5442e17e65cdf34cadb6d4681337702fde69e9bea33a290ccb2bc88151e8b5\n", " Stored in directory: /root/.cache/pip/wheels/ec/03/6f/d2e0162d94c0d451556fa43dd4d5531457245c34a36b41ef4a\n", "Successfully built unicodecsv\n", "Installing collected packages: unicodecsv, munkres, jamo, panphon, epitran\n", "Successfully installed epitran-1.26.0 jamo-0.4.1 munkres-1.1.4 panphon-0.21.2 unicodecsv-0.14.1\n" ] } ] }, { "cell_type": "code", "source": [ "! pip install g2pk==0.9.4" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "vxh7pA-mwSDV", "outputId": "f03e0881-3acb-4ab1-fbbb-016a0c4069f3" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Collecting g2pk==0.9.4\n", " Downloading g2pK-0.9.4-py3-none-any.whl.metadata (7.5 kB)\n", "Requirement already satisfied: jamo in /usr/local/lib/python3.11/dist-packages (from g2pk==0.9.4) (0.4.1)\n", "Requirement already satisfied: nltk in /usr/local/lib/python3.11/dist-packages (from g2pk==0.9.4) (3.9.1)\n", "Collecting konlpy (from g2pk==0.9.4)\n", " Downloading konlpy-0.6.0-py2.py3-none-any.whl.metadata (1.9 kB)\n", "Collecting python-mecab-ko (from g2pk==0.9.4)\n", " Downloading python_mecab_ko-1.3.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (3.4 kB)\n", "Collecting JPype1>=0.7.0 (from konlpy->g2pk==0.9.4)\n", " Downloading jpype1-1.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.9 kB)\n", "Requirement already satisfied: lxml>=4.1.0 in /usr/local/lib/python3.11/dist-packages (from konlpy->g2pk==0.9.4) (5.4.0)\n", "Requirement already satisfied: numpy>=1.6 in /usr/local/lib/python3.11/dist-packages (from konlpy->g2pk==0.9.4) (2.0.2)\n", "Requirement already satisfied: click in /usr/local/lib/python3.11/dist-packages (from nltk->g2pk==0.9.4) (8.1.8)\n", "Requirement already satisfied: joblib in /usr/local/lib/python3.11/dist-packages (from nltk->g2pk==0.9.4) (1.4.2)\n", "Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.11/dist-packages (from nltk->g2pk==0.9.4) (2024.11.6)\n", "Requirement already satisfied: tqdm in /usr/local/lib/python3.11/dist-packages (from nltk->g2pk==0.9.4) (4.67.1)\n", "Collecting python-mecab-ko-dic (from python-mecab-ko->g2pk==0.9.4)\n", " Downloading python_mecab_ko_dic-2.1.1.post2-py3-none-any.whl.metadata (1.4 kB)\n", "Requirement already satisfied: packaging in /usr/local/lib/python3.11/dist-packages (from JPype1>=0.7.0->konlpy->g2pk==0.9.4) (24.2)\n", "Downloading g2pK-0.9.4-py3-none-any.whl (27 kB)\n", "Downloading konlpy-0.6.0-py2.py3-none-any.whl (19.4 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m19.4/19.4 MB\u001b[0m \u001b[31m61.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading python_mecab_ko-1.3.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (580 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m580.9/580.9 kB\u001b[0m \u001b[31m34.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading jpype1-1.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (494 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m494.1/494.1 kB\u001b[0m \u001b[31m33.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading python_mecab_ko_dic-2.1.1.post2-py3-none-any.whl (34.5 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m34.5/34.5 MB\u001b[0m \u001b[31m18.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hInstalling collected packages: python-mecab-ko-dic, python-mecab-ko, JPype1, konlpy, g2pk\n", "Successfully installed JPype1-1.5.2 g2pk-0.9.4 konlpy-0.6.0 python-mecab-ko-1.3.7 python-mecab-ko-dic-2.1.1.post2\n" ] } ] }, { "cell_type": "code", "source": [ "! pip install jiwer" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "stR7NfnfZqB1", "outputId": "c5e09b12-0552-4e2d-fd8f-387c8308d1c4" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Collecting jiwer\n", " Downloading jiwer-3.1.0-py3-none-any.whl.metadata (2.6 kB)\n", "Requirement already satisfied: click>=8.1.8 in /usr/local/lib/python3.11/dist-packages (from jiwer) (8.1.8)\n", "Collecting rapidfuzz>=3.9.7 (from jiwer)\n", " Downloading rapidfuzz-3.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (12 kB)\n", "Downloading jiwer-3.1.0-py3-none-any.whl (22 kB)\n", "Downloading rapidfuzz-3.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.1 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.1/3.1 MB\u001b[0m \u001b[31m43.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hInstalling collected packages: rapidfuzz, jiwer\n", "Successfully installed jiwer-3.1.0 rapidfuzz-3.13.0\n" ] } ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "PfthI4eOqBri" }, "outputs": [], "source": [ "import os\n", "import re\n", "import csv\n", "import pandas as pd\n", "import json\n", "import itertools\n", "from tqdm import tqdm\n", "from jiwer import cer" ] }, { "cell_type": "markdown", "source": [ "# mapping" ], "metadata": { "id": "VtxEYym69RUH" } }, { "cell_type": "code", "source": [ "output_to_phonetics_map = {\n", " 'м': 'm',\n", " 'ʷ':' v',\n", " 'w': 'v',\n", " 'c': 'k',\n", " 'ĉ': 'C',\n", " 'č': 'C',\n", " '̕': \"?\",\n", " \"'\": '?',\n", " 'ʔ': \"?\",\n", " 'ꞌ': \"?\",\n", " '̛': \"?\",\n", " '’': \"?\",\n", " 'ʼ': \"?\",\n", " \"'\": '?',\n", " 'â': 'A',\n", " 'â': 'A',\n", " 'ȃ': 'A',\n", " 'ž': 'Z',\n", " 'š': 'S',\n", " 'W': 'v',\n", " 'β': 'f',\n", " 'е': 'e',\n", " '`': \"?\",\n", " 'ɑ': 'A',\n", " 'ɑ': 'A',\n", " 'ʃ': 'S',\n", " 'ð': 'z',\n", " 'ɾ': 'r',\n", " 'æ': 'a',\n", " 'ɪ': 'e',\n", " 'χ': 'x',\n", " 'ɣ': 'q',\n", " 'ʒ': 'Z',\n", " ':': '',\n", " 'ː': '',\n", " 'ā': 'A',\n", " 'ː': '',\n", " 'ä': 'A',\n", " 'á': 'A',\n", " 'š': 'S',\n", " 'ū': 'u',\n", " 'û': 'u',\n", " 'ś': 's',\n", " 'ī': 'i',\n", " 'í': 'i',\n", " 'î': 'i',\n", " 'é': 'e',\n", " 'ḥ': 'h',\n", " 'ɒ': 'A',\n", " 'ʰ': '',\n", " 'ə': 'e',\n", " 'R': 'r',\n", " 'W': 'v',\n", " 'Q': 'q',\n", " 'T': 't',\n", " 'Y': 'y',\n", " 'P': 'p',\n", " 'D': 'd',\n", " 'F': 'f',\n", " 'H': 'h',\n", " 'J': 'j',\n", " 'L': 'l',\n", " 'X': 'x',\n", " 'V': 'v',\n", " 'B': 'b',\n", " 'N': 'n',\n", " 'M': 'm',\n", " 'K': 'k',\n", " 'G': 'g',\n", " 'U': 'u',\n", " 'O': 'o',\n", " 'I': 'i',\n", " 'E': 'e',\n", " 'ŋ': 'ng',\n", " '.': '',\n", " 'ɛ': 'e',\n", " 'ʊ': 'u',\n", " \"ˈ\": '?',\n", " 'ù': 'u',\n", " 'θ': 's',\n", " '̪': '',\n", " 'ũ': 'u',\n", " '_': '',\n", " 'ç': 'C',\n", " 'ĝ': 'q',\n", " 'ɢ': 'q',\n", " 'ː': '',\n", " 'í': 'i',\n", " 'ŝ': 'S',\n", " '!': '',\n", " 'ǧ': 'q',\n", " 'ʻ': '?',\n", " 'è': 'e',\n", " '�': '',\n", " 'ú': 'u',\n", " 'ô': 'o',\n", " 'ē': 'e',\n", " 'à': 'A',\n", " 'ă': 'A',\n", " 'ǐ': 'i',\n", " 'ü': 'u',\n", " '\\u200e': '',\n", " 'ğ': 'q',\n", " 'ṣ': 'S',\n", " 'â': 'A',\n", " 'â': 'A',\n", " 'ȃ': 'A',\n", " 'ž': 'Z',\n", " 'š': 'S',\n", " 'ā': 'A',\n", " 'ː': '',\n", " 'ä': 'A',\n", " 'á': 'A',\n", " 'š': 'S',\n", " 'ū': 'u',\n", " 'û': 'u',\n", " 'ś': 'S',\n", " 'ī': 'i',\n", " 'í': 'i',\n", " 'î': 'i',\n", " 'é': 'e',\n", "}\n", "\n", "consonants_regex = '(?=' + '|'.join(['q', 'r', 't', 'y', 'p', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'z', 'x', 'c', 'v', 'b', 'n', 'm', 'Q', 'R', 'T', 'Y', 'P', 'S', 'D', 'F', 'G', 'H', 'J', 'K', 'L', 'Z', 'X', 'C', 'V', 'B', 'N', 'M' ]) + ')'\n", "vowels_regex = '(?=' + '|'.join(['a', 'A', 'e', 'i', 'u', 'o']) + ')'\n", "\n", "\n", "def replace_phonetic_characters(input_string, char_map=output_to_phonetics_map, from_phonetics=False):\n", " substituted = re.sub(r'tʃʰ', 'C', input_string)\n", " substituted = re.sub(r't͡ʃ', 'C', input_string)\n", " substituted = re.sub(r'tʃ', 'C', substituted)\n", " substituted = re.sub(r't͡S', 'C', substituted)\n", " substituted = re.sub(r'ow', 'o', substituted)\n", " substituted = re.sub('d͡ʒ', 'j', substituted)\n", " substituted = re.sub('dʒ', 'j', substituted)\n", "\n", " # Create a translation table using str.maketrans\n", " translation_table = str.maketrans(char_map)\n", "\n", " # Use str.translate to replace characters based on the translation table\n", " translated = substituted.translate(translation_table)\n", "\n", " return translated" ], "metadata": { "id": "TKx8oA1n7rKh" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "XjAPkfq7SF87" }, "source": [ "# Get Evaluation Data" ] }, { "cell_type": "code", "source": [ "!wget https://huggingface.co/datasets/MahtaFetrat/SentenceBench/raw/main/SentenceBench.csv" ], "metadata": { "id": "qwCG0jX-88nQ", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "bda9ccb4-f4d8-432b-f460-bfcbea7e462b" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "--2025-05-10 11:19:00-- https://huggingface.co/datasets/MahtaFetrat/SentenceBench/raw/main/SentenceBench.csv\n", "Resolving huggingface.co (huggingface.co)... 18.164.174.17, 18.164.174.55, 18.164.174.118, ...\n", "Connecting to huggingface.co (huggingface.co)|18.164.174.17|:443... connected.\n", "HTTP request sent, awaiting response... 200 OK\n", "Length: 56026 (55K) [text/plain]\n", "Saving to: ‘SentenceBench.csv’\n", "\n", "\rSentenceBench.csv 0%[ ] 0 --.-KB/s \rSentenceBench.csv 100%[===================>] 54.71K --.-KB/s in 0.008s \n", "\n", "2025-05-10 11:19:00 (6.90 MB/s) - ‘SentenceBench.csv’ saved [56026/56026]\n", "\n" ] } ] }, { "cell_type": "code", "source": [ "sentence_bench = pd.read_csv('SentenceBench.csv')" ], "metadata": { "id": "hJO-UAPDQvcb" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "sentence_bench.head(3)" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 143 }, "id": "qlYbrnUa9LAN", "outputId": "2fa1904b-72eb-4df9-9d92-f3918ce8ccf3" }, "execution_count": null, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ " dataset grapheme \\\n", "0 homograph من قدر تو را می‌دانم \n", "1 homograph از قضای الهی به قدر الهی پناه می‌برم \n", "2 homograph به دست و صورتم کرم زدم \n", "\n", " phoneme homograph word \\\n", "0 man qadr-e to rA mi-dAnam قدر \n", "1 ?az qazAy ?elAhi be qadar-e ?elAhi panAh mi-baram قدر \n", "2 be dast-o suratam kerem zadam کرم \n", "\n", " pronunciation \n", "0 qadr \n", "1 qadar \n", "2 kerem " ], "text/html": [ "\n", "
\n", "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
datasetgraphemephonemehomograph wordpronunciation
0homographمن قدر تو را می‌دانمman qadr-e to rA mi-dAnamقدرqadr
1homographاز قضای الهی به قدر الهی پناه می‌برم?az qazAy ?elAhi be qadar-e ?elAhi panAh mi-baramقدرqadar
2homographبه دست و صورتم کرم زدمbe dast-o suratam kerem zadamکرمkerem
\n", "
\n", "
\n", "\n", "
\n", " \n", "\n", " \n", "\n", " \n", "
\n", "\n", "\n", "
\n", " \n", "\n", "\n", "\n", " \n", "
\n", "
\n", "
\n" ], "application/vnd.google.colaboratory.intrinsic+json": { "type": "dataframe", "variable_name": "sentence_bench", "summary": "{\n \"name\": \"sentence_bench\",\n \"rows\": 400,\n \"fields\": [\n {\n \"column\": \"dataset\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 3,\n \"samples\": [\n \"homograph\",\n \"mana-tts\",\n \"commonvoice\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"grapheme\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 400,\n \"samples\": [\n \"\\u0622\\u06cc\\u0627 \\u0628\\u0627\\u06cc\\u062f \\u062d\\u0642\\u06cc\\u0642\\u062a \\u0631\\u0627 \\u0628\\u0647 \\u0622\\u0646\\u200c\\u0647\\u0627 \\u0628\\u06af\\u0648\\u06cc\\u06cc\\u0645\\u061f\",\n \"\\u06a9\\u0647 \\u067e\\u06cc\\u0634 \\u0627\\u0632 \\u0627\\u0646\\u0642\\u0644\\u0627\\u0628 \\u0628\\u0647 \\u062e\\u0648\\u0627\\u0628\\u06af\\u0627\\u0647 \\u062f\\u062e\\u062a\\u0631\\u0627\\u0646 \\u0648 \\u0632\\u0646\\u0627\\u0646 \\u0646\\u0627\\u0628\\u06cc\\u0646\\u0627 \\u0627\\u062e\\u062a\\u0635\\u0627\\u0635\\u200c\\u06cc\\u0627\\u0641\\u062a\\u0647 \\u0628\\u0648\\u062f. \\u0627\\u063a\\u0644\\u0628 \\u0632\\u0646\\u0627\\u0646\\u06cc \\u06a9\\u0647 \\u062f\\u0631 \\u0627\\u06cc\\u0646 \\u062e\\u0648\\u0627\\u0628\\u06af\\u0627\\u0647 \\u0632\\u0646\\u062f\\u06af\\u06cc \\u0645\\u06cc\\u200c\\u06a9\\u0631\\u062f\\u0646\\u062f\\u060c \",\n \"\\u062f\\u0648\\u062f \\u0648 \\u0645\\u0647 \\u063a\\u0644\\u06cc\\u0638\\u06cc \\u062f\\u0631 \\u0645\\u062d\\u06cc\\u0637 \\u067e\\u06cc\\u0686\\u06cc\\u062f\\u0647 \\u0628\\u0648\\u062f\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"phoneme\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 400,\n \"samples\": [\n \"?AyA bAyad haqiqat rA be ?AnhA beguyim\\u061f\",\n \"ke piS ?az ?enqelAb be xAbgAh-e doxtarAn va zanAn-e nAbinA ?extesAsyAfte bud ?aqlab-e zanAni ke dar ?in xAbgAh zendegi mikardand\",\n \"dud-o meh-e qalizi dar mohit piCide bud\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"homograph word\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 101,\n \"samples\": [\n \"\\u06af\\u0631\\u06cc\\u0645\",\n \"\\u0633\\u0628\\u06a9\\u06cc\",\n \"\\u06a9\\u0645\\u06cc\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"pronunciation\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 210,\n \"samples\": [\n \"darham\",\n \"Sum\",\n \"moSk\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}" } }, "metadata": {}, "execution_count": 8 } ] }, { "cell_type": "markdown", "metadata": { "id": "wDV7ysXf2b_H" }, "source": [ "### Get ManaTTS" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "TcL5ZLvSSnVB", "outputId": "59e9cd68-4665-4b68-bc35-9d80d2cc03d9" }, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ "[('در این نوشته بنا داریم با یک ابزار ساده و مکانیکی افزایش بینایی برای افراد کم\\u200cبینا ',\n", " 'dar ?in neveSte banA dArim bA yek ?abzAr-e sAde va mekAniki-ye ?afzAyeS-e binAyi barAye ?afrAd-e kam\\u200cbinA '),\n", " ('به نام بی\\u200cوپتیک یا عدسی دورنما آشنا شویم. ',\n", " 'be nAm-e biyoptik yA ?adasi-ye durnamA ?ASnA Savim'),\n", " ('دراین\\u200cصورت، انجام خودارزیابی و ارائه بازخورد بر عهده خودتان است. ',\n", " 'dar ?in surat ?anjAm-e xod?arzyAbi va ?erA?e-ye bAzxord bar ?ohde-ye xodetAn ?ast ')]" ] }, "metadata": {}, "execution_count": 9 } ], "source": [ "filtered_rows = sentence_bench[sentence_bench['dataset'] == 'mana-tts'][['grapheme', 'phoneme']]\n", "\n", "# Convert to a list of tuples\n", "mana_evaluation_data = list(filtered_rows.itertuples(index=False, name=None))\n", "\n", "mana_evaluation_data[:3]" ] }, { "cell_type": "markdown", "metadata": { "id": "Jjacw9Mp2eoX" }, "source": [ "### Get CommonVoice" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "-yQnqCGw26sk", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "253e406c-5fb7-4b8f-fc2e-25a289e5bb0d" }, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ "[('در اکثر شهرها، مرکزی برای خرید دوچرخه وجود دارد.',\n", " 'dar ?aksar-e Sahr-hA, markazi barAye xarid-e doCarxe vojud dArad.'),\n", " ('پس از مدرسه کودکان به سوی خانه جست و خیز کردند.',\n", " 'pas ?az madrese kudakAn be suye xAne jast-o-xiz kardand.'),\n", " ('شما نگران زن و بچه این نباش.', 'SomA negarAn-e zan-o-baCCe-ye ?in nabAS.')]" ] }, "metadata": {}, "execution_count": 10 } ], "source": [ "filtered_rows = sentence_bench[sentence_bench['dataset'] == 'commonvoice'][['grapheme', 'phoneme']]\n", "\n", "# Convert to a list of tuples\n", "commonvoice_evaluation_data = list(filtered_rows.itertuples(index=False, name=None))\n", "\n", "commonvoice_evaluation_data[:3]" ] }, { "cell_type": "markdown", "metadata": { "id": "ciSPyhRc3Rvo" }, "source": [ "### Get Homograph" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "XlFc5JbN3Rvz", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "7d6b2c71-afe5-4e1b-dc9d-16c0581e3222" }, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ "[('من قدر تو را می\\u200cدانم', 'man qadr-e to rA mi-dAnam', 'قدر', 'qadr'),\n", " ('از قضای الهی به قدر الهی پناه می\\u200cبرم',\n", " '?az qazAy ?elAhi be qadar-e ?elAhi panAh mi-baram',\n", " 'قدر',\n", " 'qadar'),\n", " ('به دست و صورتم کرم زدم', 'be dast-o suratam kerem zadam', 'کرم', 'kerem')]" ] }, "metadata": {}, "execution_count": 11 } ], "source": [ "filtered_rows = sentence_bench[sentence_bench['dataset'] == 'homograph'][['grapheme', 'phoneme', 'homograph word',\t'pronunciation']]\n", "\n", "# Convert to a list of tuples\n", "homograph_evaluation_data = list(filtered_rows.itertuples(index=False, name=None))\n", "\n", "homograph_evaluation_data[:3]" ] }, { "cell_type": "markdown", "metadata": { "id": "R6PE5ds45TPr" }, "source": [ "# Evaluate Method Outputs" ] }, { "cell_type": "markdown", "metadata": { "id": "CLKaERek4u_D" }, "source": [ "## PER Evaluation" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "nBee9xG54u_E" }, "outputs": [], "source": [ "def remove_non_word_chars(text):\n", " pattern = r'[^\\w\\s\\?]'\n", " cleaned_text = re.sub(pattern, ' ', text)\n", " return cleaned_text" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "W8PoNV9V4u_E" }, "outputs": [], "source": [ "def remove_white_spaces(text):\n", " cleaned_text = re.sub(r'\\s+', ' ', text)\n", " return cleaned_text.strip()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "YD0cvnn74u_E" }, "outputs": [], "source": [ "def get_word_only_text(text):\n", " word_only_text = remove_non_word_chars(text)\n", " extra_space_removed_text = remove_white_spaces(word_only_text)\n", "\n", " return extra_space_removed_text" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "6OQQDual4u_E" }, "outputs": [], "source": [ "def get_texts_cer(reference, model_output):\n", " # Preprocess input texts to only contain word characters\n", " word_only_reference = get_word_only_text(reference)\n", " word_only_output = get_word_only_text(model_output)\n", "\n", " # Return +infinity for CER if any of the texts is empty\n", " if not word_only_reference.strip() or not word_only_output.strip():\n", " return float('inf')\n", "\n", " return cer(word_only_reference, word_only_output)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "ncWQnPdW4u_E" }, "outputs": [], "source": [ "def get_avg_cer_of_method(method_outputs, references):\n", " cers = []\n", " for idx, o in enumerate(method_outputs):\n", " cer = get_texts_cer(o.replace('-', ''), references[idx][1].replace('-', ''))\n", " if cer != float('inf'):\n", " cers.append(cer)\n", "\n", " return sum(cers) / len(cers)" ] }, { "cell_type": "markdown", "source": [ "## Homograph Evaluation" ], "metadata": { "id": "oBgNtpFQDwku" } }, { "cell_type": "code", "source": [ "def get_homograph_performance(outputs, references):\n", " corrects = 0\n", " total = 0\n", "\n", " for idx, (g, p, homograph, right) in enumerate(references):\n", " if homograph != '':\n", " total += 1\n", " if right in outputs[idx]:\n", " corrects += 1\n", "\n", " return corrects / total" ], "metadata": { "id": "J445ULEvEEDn" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# Full bench" ], "metadata": { "id": "JGEUIrbi9kNH" } }, { "cell_type": "code", "source": [ "benchmark = []\n", "\n", "for g, p in mana_evaluation_data:\n", " benchmark.append((g, p, '', ''))\n", "\n", "for g, p in commonvoice_evaluation_data:\n", " benchmark.append((g, p, '', ''))\n", "\n", "for g, p, w, r in homograph_evaluation_data:\n", " benchmark.append((g, p, w, r))\n", "\n", "benchmark = benchmark[:400]" ], "metadata": { "id": "fGzQvL8V9mln" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "def print_all_metrics(predictions):\n", " per = get_avg_cer_of_method(predictions, benchmark) * 100\n", " homograph = get_homograph_performance(predictions, benchmark) * 100\n", "\n", " print(f\"PER: \\t\\t\\t{per:.4f}\")\n", " print(f\"HOMOGRAPH: \\t\\t{homograph:.4f}\")" ], "metadata": { "id": "DpSqE5oPbmAy" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# Epitran" ], "metadata": { "id": "k6XT11uMBnGp" } }, { "cell_type": "code", "source": [ "import epitran\n", "epi = epitran.Epitran('fas-Arab')" ], "metadata": { "id": "A53DAk2_Dakd" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "epi.transliterate(u'دلم میخواست برم ')" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 35 }, "id": "L84ue_vWwdZl", "outputId": "06e6a744-7fd5-46d2-d0e2-48a2ef9dc133" }, "execution_count": null, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ "'dlm mjxvɒst brm '" ], "application/vnd.google.colaboratory.intrinsic+json": { "type": "string" } }, "metadata": {}, "execution_count": 21 } ] }, { "cell_type": "code", "source": [ "replace_phonetic_characters(epi.transliterate(u'دلم میخواست برم '))" ], "metadata": { "id": "I_1WYcyaZyTR", "colab": { "base_uri": "https://localhost:8080/", "height": 35 }, "outputId": "927eb9fa-2bd3-44f2-8abd-92d90c9767af" }, "execution_count": null, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ "'dlm mjxvAst brm '" ], "application/vnd.google.colaboratory.intrinsic+json": { "type": "string" } }, "metadata": {}, "execution_count": 22 } ] }, { "cell_type": "markdown", "source": [ "# outputs" ], "metadata": { "id": "NLgJTtoCg4m_" } }, { "cell_type": "code", "source": [ "from tqdm import tqdm\n", "import time\n", "\n", "outputs = []\n", "start_time = time.time()\n", "\n", "for g, p, _, _ in tqdm(benchmark):\n", " o = epi.transliterate(g)\n", " outputs.append(o)\n", "\n", "total_time = time.time() - start_time\n", "avg_time = total_time / len(benchmark) if len(benchmark) > 0 else 0\n", "print(f\"Total: {total_time:.2f}s | Avg: {avg_time:.4f}s/sample\")" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "ECW_8Ja5g7FY", "outputId": "2c778f9b-7957-4b6f-9116-08681762d1e8" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "100%|██████████| 400/400 [00:00<00:00, 3625.70it/s]" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Total: 0.12s | Avg: 0.0003s/sample\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "\n" ] } ] }, { "cell_type": "code", "source": [ "mapped_outputs = []\n", "for o in outputs:\n", " mapped = replace_phonetic_characters(o)\n", " mapped_outputs.append(mapped)\n", " mapped.replace('j', 'y')" ], "metadata": { "id": "K-catlB6Esuf" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "print_all_metrics(mapped_outputs)\n", "print(f\"TOTAL TIME:\\t\\t{total_time:.4f} (s)\")\n", "print(f\"AVG TIME:\\t\\t{avg_time:.4f} (s)\")" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "H2taHCPWCnls", "outputId": "c3e8950a-898b-45ea-bfb3-0ac9f384c296" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "PER: \t\t\t45.1223\n", "HOMOGRAPH: \t\t0.0000\n", "TOTAL TIME:\t\t0.1184 (s)\n", "AVG TIME:\t\t0.0003 (s)\n" ] } ] }, { "cell_type": "markdown", "source": [ "# Runs\n", "\n", "## First:\n", "\n", "```\n", "PER: \t\t\t45.1223\n", "HOMOGRAPH: \t\t0.0000\n", "TOTAL TIME:\t\t0.1172 (s)\n", "AVG TIME:\t\t0.0003 (s)\n", "```\n", "\n", "## Second\n", "\n", "```\n", "PER: \t\t\t45.1223\n", "HOMOGRAPH: \t\t0.0000\n", "TOTAL TIME:\t\t0.1074 (s)\n", "AVG TIME:\t\t0.0003 (s)\n", "```\n", "\n", "## Third\n", "\n", "```\n", "PER: \t\t\t45.1223\n", "HOMOGRAPH: \t\t0.0000\n", "TOTAL TIME:\t\t0.1296 (s)\n", "AVG TIME:\t\t0.0003 (s)\n", "```\n", "\n", "## Fourth\n", "\n", "```\n", "PER: \t\t\t45.1223\n", "HOMOGRAPH: \t\t0.0000\n", "TOTAL TIME:\t\t0.1085 (s)\n", "AVG TIME:\t\t0.0003 (s)\n", "```\n", "\n", "## Fifth\n", "\n", "```\n", "PER: \t\t\t45.1223\n", "HOMOGRAPH: \t\t0.0000\n", "TOTAL TIME:\t\t0.1184 (s)\n", "AVG TIME:\t\t0.0003 (s)\n", "```" ], "metadata": { "id": "dq7_g71Wivog" } } ] }