{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "3526e83a-baa5-4278-81ce-e142e0a6d208", "metadata": { "tags": [] }, "outputs": [], "source": [ "import sys\n", "from pathlib import Path\n", "sys.path.append(Path('./').absolute().parent.__str__())\n", "from _datasets import AutoLoad" ] }, { "cell_type": "code", "execution_count": 48, "id": "5a0264f8-4b67-44e2-8aa9-468ae8b249b5", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(12, 15)\n", "{'a': 'b'}\n" ] } ], "source": [ "class Test():\n", " def __new__(cls, *args, **kwargs):\n", " print(args)\n", " print(kwargs)\n", "Test(12, 15, a='b')" ] }, { "cell_type": "code", "execution_count": 10, "id": "f0d8ead2-cfa6-4044-8e7a-6b7146bea9cd", "metadata": { "tags": [] }, "outputs": [], "source": [ "from transformers import T5TokenizerFast\n", "\n", "tokenizer = T5TokenizerFast.from_pretrained('google/t5-small-lm-adapt')\n", "tokenizer._is_seq2seq = True\n", "loader = AutoLoad(tokenizer=tokenizer)" ] }, { "cell_type": "code", "execution_count": 19, "id": "07c556fd-780d-4aee-a5e9-ad81a474d94b", "metadata": { "tags": [] }, "outputs": [ { "data": { "text/plain": [ "['sentence1', 'sentence2']" ] }, "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ "loader.glue_helper.get_task_input('stsb')" ] }, { "cell_type": "code", "execution_count": 11, "id": "04feb162-ef3f-42a8-ab00-23d3faea5209", "metadata": { "tags": [] }, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "8165afbb7bcb474e80b9538b0c0c39da", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Map: 0%| | 0/5749 [00:00>> super_glue_metric = evaluate.load('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n", " >>> predictions = [0, 1]\n", " >>> references = [0, 1]\n", " >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n", " >>> print(results)\n", " {'accuracy': 1.0}\n", "\n", " >>> super_glue_metric = evaluate.load('super_glue', 'cb')\n", " >>> predictions = [0, 1]\n", " >>> references = [0, 1]\n", " >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n", " >>> print(results)\n", " {'accuracy': 1.0, 'f1': 1.0}\n", "\n", " >>> super_glue_metric = evaluate.load('super_glue', 'record')\n", " >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n", " >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n", " >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n", " >>> print(results)\n", " {'exact_match': 1.0, 'f1': 1.0}\n", "\n", " >>> super_glue_metric = evaluate.load('super_glue', 'multirc')\n", " >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n", " >>> references = [0, 1]\n", " >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n", " >>> print(results)\n", " {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n", "\n", " >>> super_glue_metric = evaluate.load('super_glue', 'axb')\n", " >>> references = [0, 1]\n", " >>> predictions = [0, 1]\n", " >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n", " >>> print(results)\n", " {'matthews_correlation': 1.0}\n", "\"\"\", stored examples: 0)" ] }, "execution_count": 39, "metadata": {}, "output_type": "execute_result" } ], "source": [ "metric" ] }, { "cell_type": "code", "execution_count": 29, "id": "020f35a1-09ec-4ef3-94f4-28144778a3ab", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n", "0.1\n" ] } ], "source": [ "from transformers import T5ForConditionalGeneration\n", "import torch\n", "\n", "model = T5ForConditionalGeneration.from_pretrained('google/t5-small-lm-adapt')\n", "\n", "def mutate_remove_dropout(model):\n", " for module in model.modules():\n", " if isinstance(module, torch.nn.Dropout):\n", " module._backup_p = module.p\n", " module.p = 0\n", " print(module._backup_p)\n", "mutate_remove_dropout(model)" ] }, { "cell_type": "code", "execution_count": null, "id": "146e1eb3-f6a6-41d2-ab84-13b62de8983a", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python [conda env:deep]", "language": "python", "name": "conda-env-deep-py" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.13" } }, "nbformat": 4, "nbformat_minor": 5 }