{ "cells": [ { "cell_type": "markdown", "id": "896de91a-4ab9-40f5-a3c1-914535b6e0a7", "metadata": {}, "source": [ "# intro" ] }, { "cell_type": "code", "execution_count": 1, "id": "5f17aae6-73f5-4793-95a3-09147ea89e04", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Python version is: 3.10.11\n", "Scikit-learn version is: 1.2.2\n", "Torch version is: 1.13.1+cu117\n", "Nvidia device is: NVIDIA GeForce RTX 4090\n", "Transformers version is: 4.32.1\n", "Adapterhub not found!!!\n" ] } ], "source": [ "from typing import Optional\n", "\n", "import numpy as np\n", "from tqdm.notebook import tqdm\n", "\n", "import torch\n", "import torch.nn as nn\n", "from transformers import T5TokenizerFast, T5ForConditionalGeneration\n", "\n", "from _utils import print_system_info, generate_dataloader\n", "from _datasets import AutoLoad\n", "from _mydelta import T5Wrapper, auto_freeze\n", "from _trainer import train_loop, valid_loop\n", "\n", "print_system_info()" ] }, { "cell_type": "code", "execution_count": 2, "id": "fb5ef784-fef0-4b7b-98e7-ec5d3575a9a8", "metadata": { "tags": [] }, "outputs": [], "source": [ "from types import SimpleNamespace\n", "config = SimpleNamespace(\n", " model_name='google/t5-base-lm-adapt',\n", " n_tokens=30,\n", " n_layers=6,\n", " random_seed=42,\n", " task=['glue:cola'],\n", " hot_modules=['sadcl'],\n", " train_batch_size=32,\n", " valid_batch_size=32,\n", " balancify_sample=False,\n", " learning_rate=0.01,\n", " num_epochs=200\n", ")" ] }, { "cell_type": "code", "execution_count": 3, "id": "d3802d01-7c5a-4c11-beaf-f683a2fb9d80", "metadata": { "tags": [] }, "outputs": [], "source": [ "DEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", "\n", "np.random.seed(config.random_seed)\n", "slected_tokens = torch.from_numpy(np.random.randint(0, 32128, size=(config.n_tokens,)))" ] }, { "cell_type": "markdown", "id": "1e785d49-beca-4333-986e-b198bbaadf7d", "metadata": {}, "source": [ "# load model and date" ] }, { "cell_type": "code", "execution_count": 4, "id": "afcc6244-978a-425a-9fa9-8b11dd0df8ba", "metadata": { "tags": [] }, "outputs": [], "source": [ "model = T5ForConditionalGeneration.from_pretrained(config.model_name)\n", "tokenizer = T5TokenizerFast.from_pretrained(config.model_name, model_max_length=2048)" ] }, { "cell_type": "code", "execution_count": 5, "id": "894a8474-e2e1-4f9d-b9ab-58d911808ec0", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "encoder.block.6.soft_prompt.sadcl_learned_embedding\n", "encoder.block.7.soft_prompt.sadcl_learned_embedding\n", "encoder.block.8.soft_prompt.sadcl_learned_embedding\n", "encoder.block.9.soft_prompt.sadcl_learned_embedding\n", "encoder.block.10.soft_prompt.sadcl_learned_embedding\n", "encoder.block.11.soft_prompt.sadcl_learned_embedding\n" ] } ], "source": [ "delta_module = T5Wrapper.mutate(\n", " model=model,\n", " config=config,\n", " slected_tokens=slected_tokens\n", ")\n", "auto_freeze(model, config.hot_modules, verbose=True)" ] }, { "cell_type": "code", "execution_count": 15, "id": "9453d3cc-c04c-4a27-83aa-eaac3e49c14e", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "shared.weight\n", "encoder.block.0.layer.0.SelfAttention.q.weight\n", "encoder.block.0.layer.0.SelfAttention.k.weight\n", "encoder.block.0.layer.0.SelfAttention.v.weight\n", "encoder.block.0.layer.0.SelfAttention.o.weight\n", "encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight\n", "encoder.block.0.layer.0.layer_norm.weight\n", "encoder.block.0.layer.1.DenseReluDense.wi_0.weight\n", "encoder.block.0.layer.1.DenseReluDense.wi_1.weight\n", "encoder.block.0.layer.1.DenseReluDense.wo.weight\n", "encoder.block.0.layer.1.layer_norm.weight\n", "encoder.block.1.layer.0.SelfAttention.q.weight\n", "encoder.block.1.layer.0.SelfAttention.k.weight\n", "encoder.block.1.layer.0.SelfAttention.v.weight\n", "encoder.block.1.layer.0.SelfAttention.o.weight\n", "encoder.block.1.layer.0.layer_norm.weight\n", "encoder.block.1.layer.1.DenseReluDense.wi_0.weight\n", "encoder.block.1.layer.1.DenseReluDense.wi_1.weight\n", "encoder.block.1.layer.1.DenseReluDense.wo.weight\n", "encoder.block.1.layer.1.layer_norm.weight\n", "encoder.block.2.layer.0.SelfAttention.q.weight\n", "encoder.block.2.layer.0.SelfAttention.k.weight\n", "encoder.block.2.layer.0.SelfAttention.v.weight\n", "encoder.block.2.layer.0.SelfAttention.o.weight\n", "encoder.block.2.layer.0.layer_norm.weight\n", "encoder.block.2.layer.1.DenseReluDense.wi_0.weight\n", "encoder.block.2.layer.1.DenseReluDense.wi_1.weight\n", "encoder.block.2.layer.1.DenseReluDense.wo.weight\n", "encoder.block.2.layer.1.layer_norm.weight\n", "encoder.block.3.layer.0.SelfAttention.q.weight\n", "encoder.block.3.layer.0.SelfAttention.k.weight\n", "encoder.block.3.layer.0.SelfAttention.v.weight\n", "encoder.block.3.layer.0.SelfAttention.o.weight\n", "encoder.block.3.layer.0.layer_norm.weight\n", "encoder.block.3.layer.1.DenseReluDense.wi_0.weight\n", "encoder.block.3.layer.1.DenseReluDense.wi_1.weight\n", "encoder.block.3.layer.1.DenseReluDense.wo.weight\n", "encoder.block.3.layer.1.layer_norm.weight\n", "encoder.block.4.layer.0.SelfAttention.q.weight\n", "encoder.block.4.layer.0.SelfAttention.k.weight\n", "encoder.block.4.layer.0.SelfAttention.v.weight\n", "encoder.block.4.layer.0.SelfAttention.o.weight\n", "encoder.block.4.layer.0.layer_norm.weight\n", "encoder.block.4.layer.1.DenseReluDense.wi_0.weight\n", "encoder.block.4.layer.1.DenseReluDense.wi_1.weight\n", "encoder.block.4.layer.1.DenseReluDense.wo.weight\n", "encoder.block.4.layer.1.layer_norm.weight\n", "encoder.block.5.layer.0.SelfAttention.q.weight\n", "encoder.block.5.layer.0.SelfAttention.k.weight\n", "encoder.block.5.layer.0.SelfAttention.v.weight\n", "encoder.block.5.layer.0.SelfAttention.o.weight\n", "encoder.block.5.layer.0.layer_norm.weight\n", "encoder.block.5.layer.1.DenseReluDense.wi_0.weight\n", "encoder.block.5.layer.1.DenseReluDense.wi_1.weight\n", "encoder.block.5.layer.1.DenseReluDense.wo.weight\n", "encoder.block.5.layer.1.layer_norm.weight\n", "encoder.block.6.original_module.layer.0.SelfAttention.q.weight\n", "encoder.block.6.original_module.layer.0.SelfAttention.k.weight\n", "encoder.block.6.original_module.layer.0.SelfAttention.v.weight\n", "encoder.block.6.original_module.layer.0.SelfAttention.o.weight\n", "encoder.block.6.original_module.layer.0.layer_norm.weight\n", "encoder.block.6.original_module.layer.1.DenseReluDense.wi_0.weight\n", "encoder.block.6.original_module.layer.1.DenseReluDense.wi_1.weight\n", "encoder.block.6.original_module.layer.1.DenseReluDense.wo.weight\n", "encoder.block.6.original_module.layer.1.layer_norm.weight\n", "encoder.block.6.soft_prompt.sadcl_learned_embedding\n", "encoder.block.7.original_module.layer.0.SelfAttention.q.weight\n", "encoder.block.7.original_module.layer.0.SelfAttention.k.weight\n", "encoder.block.7.original_module.layer.0.SelfAttention.v.weight\n", "encoder.block.7.original_module.layer.0.SelfAttention.o.weight\n", "encoder.block.7.original_module.layer.0.layer_norm.weight\n", "encoder.block.7.original_module.layer.1.DenseReluDense.wi_0.weight\n", "encoder.block.7.original_module.layer.1.DenseReluDense.wi_1.weight\n", "encoder.block.7.original_module.layer.1.DenseReluDense.wo.weight\n", "encoder.block.7.original_module.layer.1.layer_norm.weight\n", "encoder.block.7.soft_prompt.sadcl_learned_embedding\n", "encoder.block.8.original_module.layer.0.SelfAttention.q.weight\n", "encoder.block.8.original_module.layer.0.SelfAttention.k.weight\n", "encoder.block.8.original_module.layer.0.SelfAttention.v.weight\n", "encoder.block.8.original_module.layer.0.SelfAttention.o.weight\n", "encoder.block.8.original_module.layer.0.layer_norm.weight\n", "encoder.block.8.original_module.layer.1.DenseReluDense.wi_0.weight\n", "encoder.block.8.original_module.layer.1.DenseReluDense.wi_1.weight\n", "encoder.block.8.original_module.layer.1.DenseReluDense.wo.weight\n", "encoder.block.8.original_module.layer.1.layer_norm.weight\n", "encoder.block.8.soft_prompt.sadcl_learned_embedding\n", "encoder.block.9.original_module.layer.0.SelfAttention.q.weight\n", "encoder.block.9.original_module.layer.0.SelfAttention.k.weight\n", "encoder.block.9.original_module.layer.0.SelfAttention.v.weight\n", "encoder.block.9.original_module.layer.0.SelfAttention.o.weight\n", "encoder.block.9.original_module.layer.0.layer_norm.weight\n", "encoder.block.9.original_module.layer.1.DenseReluDense.wi_0.weight\n", "encoder.block.9.original_module.layer.1.DenseReluDense.wi_1.weight\n", "encoder.block.9.original_module.layer.1.DenseReluDense.wo.weight\n", "encoder.block.9.original_module.layer.1.layer_norm.weight\n", "encoder.block.9.soft_prompt.sadcl_learned_embedding\n", "encoder.block.10.original_module.layer.0.SelfAttention.q.weight\n", "encoder.block.10.original_module.layer.0.SelfAttention.k.weight\n", "encoder.block.10.original_module.layer.0.SelfAttention.v.weight\n", "encoder.block.10.original_module.layer.0.SelfAttention.o.weight\n", "encoder.block.10.original_module.layer.0.layer_norm.weight\n", "encoder.block.10.original_module.layer.1.DenseReluDense.wi_0.weight\n", "encoder.block.10.original_module.layer.1.DenseReluDense.wi_1.weight\n", "encoder.block.10.original_module.layer.1.DenseReluDense.wo.weight\n", "encoder.block.10.original_module.layer.1.layer_norm.weight\n", "encoder.block.10.soft_prompt.sadcl_learned_embedding\n", "encoder.block.11.original_module.layer.0.SelfAttention.q.weight\n", "encoder.block.11.original_module.layer.0.SelfAttention.k.weight\n", "encoder.block.11.original_module.layer.0.SelfAttention.v.weight\n", "encoder.block.11.original_module.layer.0.SelfAttention.o.weight\n", "encoder.block.11.original_module.layer.0.layer_norm.weight\n", "encoder.block.11.original_module.layer.1.DenseReluDense.wi_0.weight\n", "encoder.block.11.original_module.layer.1.DenseReluDense.wi_1.weight\n", "encoder.block.11.original_module.layer.1.DenseReluDense.wo.weight\n", "encoder.block.11.original_module.layer.1.layer_norm.weight\n", "encoder.block.11.soft_prompt.sadcl_learned_embedding\n", "encoder.final_layer_norm.weight\n", "decoder.block.0.layer.0.SelfAttention.q.weight\n", "decoder.block.0.layer.0.SelfAttention.k.weight\n", "decoder.block.0.layer.0.SelfAttention.v.weight\n", "decoder.block.0.layer.0.SelfAttention.o.weight\n", "decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight\n", "decoder.block.0.layer.0.layer_norm.weight\n", "decoder.block.0.layer.1.EncDecAttention.q.weight\n", "decoder.block.0.layer.1.EncDecAttention.k.weight\n", "decoder.block.0.layer.1.EncDecAttention.v.weight\n", "decoder.block.0.layer.1.EncDecAttention.o.weight\n", "decoder.block.0.layer.1.layer_norm.weight\n", "decoder.block.0.layer.2.DenseReluDense.wi_0.weight\n", "decoder.block.0.layer.2.DenseReluDense.wi_1.weight\n", "decoder.block.0.layer.2.DenseReluDense.wo.weight\n", "decoder.block.0.layer.2.layer_norm.weight\n", "decoder.block.1.layer.0.SelfAttention.q.weight\n", "decoder.block.1.layer.0.SelfAttention.k.weight\n", "decoder.block.1.layer.0.SelfAttention.v.weight\n", "decoder.block.1.layer.0.SelfAttention.o.weight\n", "decoder.block.1.layer.0.layer_norm.weight\n", "decoder.block.1.layer.1.EncDecAttention.q.weight\n", "decoder.block.1.layer.1.EncDecAttention.k.weight\n", "decoder.block.1.layer.1.EncDecAttention.v.weight\n", "decoder.block.1.layer.1.EncDecAttention.o.weight\n", "decoder.block.1.layer.1.layer_norm.weight\n", "decoder.block.1.layer.2.DenseReluDense.wi_0.weight\n", "decoder.block.1.layer.2.DenseReluDense.wi_1.weight\n", "decoder.block.1.layer.2.DenseReluDense.wo.weight\n", "decoder.block.1.layer.2.layer_norm.weight\n", "decoder.block.2.layer.0.SelfAttention.q.weight\n", "decoder.block.2.layer.0.SelfAttention.k.weight\n", "decoder.block.2.layer.0.SelfAttention.v.weight\n", "decoder.block.2.layer.0.SelfAttention.o.weight\n", "decoder.block.2.layer.0.layer_norm.weight\n", "decoder.block.2.layer.1.EncDecAttention.q.weight\n", "decoder.block.2.layer.1.EncDecAttention.k.weight\n", "decoder.block.2.layer.1.EncDecAttention.v.weight\n", "decoder.block.2.layer.1.EncDecAttention.o.weight\n", "decoder.block.2.layer.1.layer_norm.weight\n", "decoder.block.2.layer.2.DenseReluDense.wi_0.weight\n", "decoder.block.2.layer.2.DenseReluDense.wi_1.weight\n", "decoder.block.2.layer.2.DenseReluDense.wo.weight\n", "decoder.block.2.layer.2.layer_norm.weight\n", "decoder.block.3.layer.0.SelfAttention.q.weight\n", "decoder.block.3.layer.0.SelfAttention.k.weight\n", "decoder.block.3.layer.0.SelfAttention.v.weight\n", "decoder.block.3.layer.0.SelfAttention.o.weight\n", "decoder.block.3.layer.0.layer_norm.weight\n", "decoder.block.3.layer.1.EncDecAttention.q.weight\n", "decoder.block.3.layer.1.EncDecAttention.k.weight\n", "decoder.block.3.layer.1.EncDecAttention.v.weight\n", "decoder.block.3.layer.1.EncDecAttention.o.weight\n", "decoder.block.3.layer.1.layer_norm.weight\n", "decoder.block.3.layer.2.DenseReluDense.wi_0.weight\n", "decoder.block.3.layer.2.DenseReluDense.wi_1.weight\n", "decoder.block.3.layer.2.DenseReluDense.wo.weight\n", "decoder.block.3.layer.2.layer_norm.weight\n", "decoder.block.4.layer.0.SelfAttention.q.weight\n", "decoder.block.4.layer.0.SelfAttention.k.weight\n", "decoder.block.4.layer.0.SelfAttention.v.weight\n", "decoder.block.4.layer.0.SelfAttention.o.weight\n", "decoder.block.4.layer.0.layer_norm.weight\n", "decoder.block.4.layer.1.EncDecAttention.q.weight\n", "decoder.block.4.layer.1.EncDecAttention.k.weight\n", "decoder.block.4.layer.1.EncDecAttention.v.weight\n", "decoder.block.4.layer.1.EncDecAttention.o.weight\n", "decoder.block.4.layer.1.layer_norm.weight\n", "decoder.block.4.layer.2.DenseReluDense.wi_0.weight\n", "decoder.block.4.layer.2.DenseReluDense.wi_1.weight\n", "decoder.block.4.layer.2.DenseReluDense.wo.weight\n", "decoder.block.4.layer.2.layer_norm.weight\n", "decoder.block.5.layer.0.SelfAttention.q.weight\n", "decoder.block.5.layer.0.SelfAttention.k.weight\n", "decoder.block.5.layer.0.SelfAttention.v.weight\n", "decoder.block.5.layer.0.SelfAttention.o.weight\n", "decoder.block.5.layer.0.layer_norm.weight\n", "decoder.block.5.layer.1.EncDecAttention.q.weight\n", "decoder.block.5.layer.1.EncDecAttention.k.weight\n", "decoder.block.5.layer.1.EncDecAttention.v.weight\n", "decoder.block.5.layer.1.EncDecAttention.o.weight\n", "decoder.block.5.layer.1.layer_norm.weight\n", "decoder.block.5.layer.2.DenseReluDense.wi_0.weight\n", "decoder.block.5.layer.2.DenseReluDense.wi_1.weight\n", "decoder.block.5.layer.2.DenseReluDense.wo.weight\n", "decoder.block.5.layer.2.layer_norm.weight\n", "decoder.block.6.layer.0.SelfAttention.q.weight\n", "decoder.block.6.layer.0.SelfAttention.k.weight\n", "decoder.block.6.layer.0.SelfAttention.v.weight\n", "decoder.block.6.layer.0.SelfAttention.o.weight\n", "decoder.block.6.layer.0.layer_norm.weight\n", "decoder.block.6.layer.1.EncDecAttention.q.weight\n", "decoder.block.6.layer.1.EncDecAttention.k.weight\n", "decoder.block.6.layer.1.EncDecAttention.v.weight\n", "decoder.block.6.layer.1.EncDecAttention.o.weight\n", "decoder.block.6.layer.1.layer_norm.weight\n", "decoder.block.6.layer.2.DenseReluDense.wi_0.weight\n", "decoder.block.6.layer.2.DenseReluDense.wi_1.weight\n", "decoder.block.6.layer.2.DenseReluDense.wo.weight\n", "decoder.block.6.layer.2.layer_norm.weight\n", "decoder.block.7.layer.0.SelfAttention.q.weight\n", "decoder.block.7.layer.0.SelfAttention.k.weight\n", "decoder.block.7.layer.0.SelfAttention.v.weight\n", "decoder.block.7.layer.0.SelfAttention.o.weight\n", "decoder.block.7.layer.0.layer_norm.weight\n", "decoder.block.7.layer.1.EncDecAttention.q.weight\n", "decoder.block.7.layer.1.EncDecAttention.k.weight\n", "decoder.block.7.layer.1.EncDecAttention.v.weight\n", "decoder.block.7.layer.1.EncDecAttention.o.weight\n", "decoder.block.7.layer.1.layer_norm.weight\n", "decoder.block.7.layer.2.DenseReluDense.wi_0.weight\n", "decoder.block.7.layer.2.DenseReluDense.wi_1.weight\n", "decoder.block.7.layer.2.DenseReluDense.wo.weight\n", "decoder.block.7.layer.2.layer_norm.weight\n", "decoder.block.8.layer.0.SelfAttention.q.weight\n", "decoder.block.8.layer.0.SelfAttention.k.weight\n", "decoder.block.8.layer.0.SelfAttention.v.weight\n", "decoder.block.8.layer.0.SelfAttention.o.weight\n", "decoder.block.8.layer.0.layer_norm.weight\n", "decoder.block.8.layer.1.EncDecAttention.q.weight\n", "decoder.block.8.layer.1.EncDecAttention.k.weight\n", "decoder.block.8.layer.1.EncDecAttention.v.weight\n", "decoder.block.8.layer.1.EncDecAttention.o.weight\n", "decoder.block.8.layer.1.layer_norm.weight\n", "decoder.block.8.layer.2.DenseReluDense.wi_0.weight\n", "decoder.block.8.layer.2.DenseReluDense.wi_1.weight\n", "decoder.block.8.layer.2.DenseReluDense.wo.weight\n", "decoder.block.8.layer.2.layer_norm.weight\n", "decoder.block.9.layer.0.SelfAttention.q.weight\n", "decoder.block.9.layer.0.SelfAttention.k.weight\n", "decoder.block.9.layer.0.SelfAttention.v.weight\n", "decoder.block.9.layer.0.SelfAttention.o.weight\n", "decoder.block.9.layer.0.layer_norm.weight\n", "decoder.block.9.layer.1.EncDecAttention.q.weight\n", "decoder.block.9.layer.1.EncDecAttention.k.weight\n", "decoder.block.9.layer.1.EncDecAttention.v.weight\n", "decoder.block.9.layer.1.EncDecAttention.o.weight\n", "decoder.block.9.layer.1.layer_norm.weight\n", "decoder.block.9.layer.2.DenseReluDense.wi_0.weight\n", "decoder.block.9.layer.2.DenseReluDense.wi_1.weight\n", "decoder.block.9.layer.2.DenseReluDense.wo.weight\n", "decoder.block.9.layer.2.layer_norm.weight\n", "decoder.block.10.layer.0.SelfAttention.q.weight\n", "decoder.block.10.layer.0.SelfAttention.k.weight\n", "decoder.block.10.layer.0.SelfAttention.v.weight\n", "decoder.block.10.layer.0.SelfAttention.o.weight\n", "decoder.block.10.layer.0.layer_norm.weight\n", "decoder.block.10.layer.1.EncDecAttention.q.weight\n", "decoder.block.10.layer.1.EncDecAttention.k.weight\n", "decoder.block.10.layer.1.EncDecAttention.v.weight\n", "decoder.block.10.layer.1.EncDecAttention.o.weight\n", "decoder.block.10.layer.1.layer_norm.weight\n", "decoder.block.10.layer.2.DenseReluDense.wi_0.weight\n", "decoder.block.10.layer.2.DenseReluDense.wi_1.weight\n", "decoder.block.10.layer.2.DenseReluDense.wo.weight\n", "decoder.block.10.layer.2.layer_norm.weight\n", "decoder.block.11.layer.0.SelfAttention.q.weight\n", "decoder.block.11.layer.0.SelfAttention.k.weight\n", "decoder.block.11.layer.0.SelfAttention.v.weight\n", "decoder.block.11.layer.0.SelfAttention.o.weight\n", "decoder.block.11.layer.0.layer_norm.weight\n", "decoder.block.11.layer.1.EncDecAttention.q.weight\n", "decoder.block.11.layer.1.EncDecAttention.k.weight\n", "decoder.block.11.layer.1.EncDecAttention.v.weight\n", "decoder.block.11.layer.1.EncDecAttention.o.weight\n", "decoder.block.11.layer.1.layer_norm.weight\n", "decoder.block.11.layer.2.DenseReluDense.wi_0.weight\n", "decoder.block.11.layer.2.DenseReluDense.wi_1.weight\n", "decoder.block.11.layer.2.DenseReluDense.wo.weight\n", "decoder.block.11.layer.2.layer_norm.weight\n", "decoder.final_layer_norm.weight\n", "lm_head.weight\n" ] } ], "source": [ "for x, y in model.named_parameters():\n", " print(x)" ] }, { "cell_type": "code", "execution_count": 6, "id": "4a34e1f1-1fc1-4577-a87d-efeac33894b1", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Found cached dataset glue (/home/mohalisad/.cache/huggingface/datasets/glue/cola/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "22d7491179634c75ab8a5c70e9e4188f", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/3 [00:00