{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "bce6d2a3-c3df-46f9-926e-2dda07dc9a3d", "metadata": { "tags": [] }, "outputs": [], "source": [ "from types import SimpleNamespace\n", "from typing import Optional\n", "\n", "import torch\n", "import torch.nn as nn" ] }, { "cell_type": "code", "execution_count": 2, "id": "5095bac0-f9ef-4aee-8050-acab81ee0d6f", "metadata": { "tags": [] }, "outputs": [], "source": [ "DEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", "MODEL_NAME = 'bert-base-uncased'\n", "NAMESPACE = 'sadcl'\n", "\n", "NTOKENS = 10\n", "PROMPT_PLACE = 'post' # pre" ] }, { "cell_type": "code", "execution_count": 6, "id": "ad41bd6e-d7f5-4c4b-a4fd-de039bb9b8c7", "metadata": { "tags": [] }, "outputs": [], "source": [ "def initialize_embedding(\n", " emb_dim: int,\n", " n_tokens: int, \n", " random_range: float,\n", " initialize_from: Optional[torch.Tensor] = None\n", "):\n", " if initialize_from is None:\n", " return torch.FloatTensor(n_tokens, emb_dim).uniform_(-random_range, random_range)\n", "\n", " assert initialize_from.shape == (n_tokens, )\n", "\n", " return initialize_from.clone().detach().tile(1, emb_dim)\n", "\n", "class SoftEmbedding(nn.Module):\n", " def __init__(\n", " self,\n", " emb_dim: int,\n", " n_tokens: int, \n", " random_range: float = 0.5,\n", " prompt_place: str = 'post',\n", " mode: str = 'cat',\n", " initialize_from: Optional[torch.Tensor] = None\n", " ):\n", " super().__init__()\n", " assert mode in ['cat', 'add']\n", " assert prompt_place in ['pre', 'post']\n", " \n", " self.post_tokenizer_map = {\n", " 'input_ids': 0,\n", " 'attention_mask': 1,\n", " 'token_type_ids': 0\n", " }\n", " self.n_tokens = n_tokens\n", " self.mode = mode\n", " self.prompt_place = prompt_place\n", " \n", " self.sadcl_learned_embedding = nn.parameter.Parameter(\n", " initialize_embedding(\n", " emb_dim,\n", " n_tokens,\n", " random_range,\n", " initialize_from\n", " )\n", " )\n", "\n", " assert self.sadcl_learned_embedding.shape == (n_tokens, emb_dim)\n", " \n", " def forward(self, input_embedding):\n", " # input_embedding.shape = (batch_size, num_of_input_tokens, emb_dim)\n", " batch_size = input_embedding.size(0)\n", " if self.mode == 'cat':\n", " learned_embedding = self.sadcl_learned_embedding.repeat(batch_size, 1, 1) # (batch_size, n_tokens, emb_dim)\n", " return self.concat_batch(input_embedding[self.get_slice_for_cat()], learned_embedding)\n", " else: # mode == add\n", " input_embedding[self.get_slice_for_add()] += self.sadcl_learned_embedding[None, :, :]\n", " return input_embedding\n", " \n", " def get_weights(self):\n", " return self.sadcl_learned_embedding.detach().clone()\n", " \n", " def set_weights(self, new_weights: torch.Tensor):\n", " self.sadcl_learned_embedding.data = new_weights\n", " \n", " def get_slice_for_add(self):\n", " if self.prompt_place == 'pre':\n", " return slice(None), slice(None, self.n_tokens), slice(None)\n", " else: # prompt_place == post\n", " return slice(None), slice(-self.n_tokens, None), slice(None)\n", " \n", " def get_slice_for_cat(self):\n", " if self.prompt_place == 'pre':\n", " return slice(None), slice(self.n_tokens, None), slice(None)\n", " else: # prompt_place == post\n", " return slice(None), slice(None, -self.n_tokens), slice(None)\n", " \n", " def concat_batch(self, orig_vals, new_vals):\n", " if self.prompt_place == 'pre':\n", " return torch.cat([new_vals, orig_vals], axis=1)\n", " else: # prompt_place == post\n", " return torch.cat([orig_vals, new_vals], axis=1)\n", " \n", " def post_tokenizer(self, **kwargs):\n", " for special_key, pad_val in self.post_tokenizer_map.items():\n", " if special_key in kwargs:\n", " orig_tokens = kwargs[special_key]\n", " batch_size = kwargs[special_key].size(0)\n", " new_vals = torch.full(\n", " size=(batch_size, self.n_tokens),\n", " fill_value=pad_val,\n", " dtype=orig_tokens.dtype,\n", " device=orig_tokens.device\n", " )\n", " kwargs[special_key].data = self.concat_batch(orig_tokens, new_vals)\n", " return kwargs\n", "\n", "class TransformerInjector(nn.Module):\n", " def __init__(self, module):\n", " super().__init__()\n", " self.original_module = module\n", " self.add_prompt = SoftEmbedding(\n", " emb_dim=module.output.dense.out_features,\n", " n_tokens=NTOKENS,\n", " prompt_place=PROMPT_PLACE,\n", " mode='add'\n", " )\n", " \n", " def forward(self, hidden_states, *args, **kwargs):\n", " hidden_states = self.add_prompt(hidden_states)\n", " return self.original_module(hidden_states, *args, **kwargs)\n", " \n", " @classmethod\n", " def muatate_list(cls, module_list):\n", " for idx, module in enumerate(module_list):\n", " module_list[idx] = cls(module)\n", " return module_list\n", " \n", "class NewEmbeddingLayer(nn.Module):\n", " def __init__(self, emb_layer=nn.Embedding):\n", " super().__init__()\n", " self.emb_layer = emb_layer\n", " self.soft_prompt = SoftEmbedding(\n", " emb_dim=emb_layer.weight.size(1),\n", " n_tokens=NTOKENS,\n", " prompt_place=PROMPT_PLACE\n", " )\n", " \n", " def forward(self, tokens):\n", " out = self.emb_layer(tokens)\n", " out = self.soft_prompt(out)\n", " return out\n", " \n", " def get_weights(self):\n", " return self.soft_prompt.get_weights()\n", " \n", " def set_weights(self, new_weights):\n", " self.soft_prompt.set_weights(new_weights)\n", " \n", " @classmethod\n", " def mutate(cls, model):\n", " emb_layer = model.get_input_embeddings()\n", " new_emb_layer = cls(emb_layer)\n", " model.set_input_embeddings(new_emb_layer)\n", " \n", " orig_forward = model.forward\n", " \n", " def new_forward(**kwargs):\n", " new_kwargs = new_emb_layer.soft_prompt.post_tokenizer(**kwargs)\n", " return orig_forward(**new_kwargs)\n", " \n", " model.forward = new_forward\n", " return new_emb_layer" ] }, { "cell_type": "code", "execution_count": 7, "id": "79bf6687-5a88-4181-88dc-740d11dd89ac", "metadata": { "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at bert-base-uncased and are newly initialized: ['classifier.bias', 'classifier.weight']\n", "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" ] } ], "source": [ "from transformers import BertForSequenceClassification, BertTokenizerFast\n", "\n", "model = BertForSequenceClassification.from_pretrained(MODEL_NAME)\n", "tokenizer = BertTokenizerFast.from_pretrained(MODEL_NAME)\n", "\n", "peft_module = NewEmbeddingLayer.mutate(model)\n", "peft_bert_layers = TransformerInjector.muatate_list(model.bert.encoder.layer)\n", "\n", "model.to(DEVICE);" ] }, { "cell_type": "code", "execution_count": 23, "id": "c0b0a48e-0b0b-43de-ae78-d2521cfee69e", "metadata": { "tags": [] }, "outputs": [ { "data": { "text/plain": [ "tensor([[-0.2546, -0.0352, -0.4110, ..., 0.0189, 0.4121, 0.2206],\n", " [ 0.0670, 0.0600, 0.4493, ..., -0.4346, 0.4130, -0.3507],\n", " [ 0.0827, 0.3569, 0.0943, ..., -0.3451, -0.1879, 0.0831],\n", " ...,\n", " [-0.0489, -0.2570, -0.3328, ..., -0.4109, 0.0884, -0.0290],\n", " [-0.2705, -0.3854, 0.4559, ..., -0.0480, -0.4039, 0.4245],\n", " [-0.1941, 0.2237, 0.3494, ..., -0.1199, -0.3030, -0.1530]],\n", " device='cuda:0')" ] }, "execution_count": 23, "metadata": {}, "output_type": "execute_result" } ], "source": [ "old_w = peft_module.get_weights()\n", "old_w" ] }, { "cell_type": "code", "execution_count": 24, "id": "d3753569-c95f-4f8e-99ec-e6f990ec55a8", "metadata": { "tags": [] }, "outputs": [], "source": [ "# tokens = tokenizer(\"Hi bye\", return_tensors='pt').to(DEVICE)\n", "\n", "# model.eval()\n", "# with torch.no_grad():\n", "# out = model(**tokens)\n", "# out" ] }, { "cell_type": "code", "execution_count": 3, "id": "23b6f5b1-bbb7-43b9-b5a9-e62d313f4244", "metadata": { "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Found cached dataset glue (/home/mohalisad/.cache/huggingface/datasets/glue/cola/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "e2bc8f1df0934619941ae8e37e2be807", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/3 [00:00, ?it/s]" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "from _datasets import AutoLoad\n", "autoload = AutoLoad()" ] }, { "cell_type": "code", "execution_count": 26, "id": "45cb37be-9aee-45f6-8a8a-bf859197a7d4", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "bert.embeddings.word_embeddings.soft_prompt.sadcl_learned_embedding\n", "bert.encoder.layer.0.add_prompt.sadcl_learned_embedding\n", "bert.encoder.layer.1.add_prompt.sadcl_learned_embedding\n", "bert.encoder.layer.2.add_prompt.sadcl_learned_embedding\n", "bert.encoder.layer.3.add_prompt.sadcl_learned_embedding\n", "bert.encoder.layer.4.add_prompt.sadcl_learned_embedding\n", "bert.encoder.layer.5.add_prompt.sadcl_learned_embedding\n", "bert.encoder.layer.6.add_prompt.sadcl_learned_embedding\n", "bert.encoder.layer.7.add_prompt.sadcl_learned_embedding\n", "bert.encoder.layer.8.add_prompt.sadcl_learned_embedding\n", "bert.encoder.layer.9.add_prompt.sadcl_learned_embedding\n", "bert.encoder.layer.10.add_prompt.sadcl_learned_embedding\n", "bert.encoder.layer.11.add_prompt.sadcl_learned_embedding\n", "classifier.weight\n", "classifier.bias\n" ] } ], "source": [ "for param_name, weights in model.named_parameters():\n", " if 'classifier' in param_name or NAMESPACE in param_name:\n", " weights.requires_grad = True\n", " print(param_name)\n", " else:\n", " weights.requires_grad = False" ] }, { "cell_type": "code", "execution_count": 8, "id": "47f78a61-710f-410d-8f49-19da12eef09a", "metadata": { "tags": [] }, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Map: 0%| | 0/8551 [00:00, ? examples/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Map: 0%| | 0/1043 [00:00, ? examples/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Map: 0%| | 0/1063 [00:00, ? examples/s]" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "loader_out = autoload.get_and_map(tokenizer, \"glue:cola\")\n" ] }, { "cell_type": "code", "execution_count": 9, "id": "8d75737f-e5c6-4dc1-94b9-8aaa507648e2", "metadata": { "tags": [] }, "outputs": [ { "data": { "text/plain": [ "{'train': Dataset({\n", " features: ['sentence', 'label', 'idx', 'input_ids', 'token_type_ids', 'attention_mask'],\n", " num_rows: 8551\n", " }),\n", " 'valid': Dataset({\n", " features: ['sentence', 'label', 'idx', 'input_ids', 'token_type_ids', 'attention_mask'],\n", " num_rows: 1043\n", " }),\n", " 'output': {'kind': 'classification', 'range': {0, 1}}}" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "loader_out" ] }, { "cell_type": "code", "execution_count": 28, "id": "2489364c-4d8d-4d69-8d52-7ac88d66e7f8", "metadata": { "tags": [] }, "outputs": [], "source": [ "from config import load_config\n", "config = load_config('config.yaml')" ] }, { "cell_type": "code", "execution_count": 29, "id": "67e68e28-b4d0-42fd-a7e7-b1321485fc78", "metadata": { "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Loading cached processed dataset at /home/mohalisad/.cache/huggingface/datasets/glue/cola/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad/cache-41a6799222324b5f.arrow\n", "Loading cached processed dataset at /home/mohalisad/.cache/huggingface/datasets/glue/cola/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad/cache-9fc7d7deaf3161a2.arrow\n", "Loading cached processed dataset at /home/mohalisad/.cache/huggingface/datasets/glue/cola/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad/cache-0eb862d54758b38d.arrow\n", "You're using a BertTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.\n" ] }, { "data": { "text/html": [ "\n", "
Epoch | \n", "Training Loss | \n", "Validation Loss | \n", "Accuracy | \n", "F1-score-1 | \n", "F1-score-ma | \n", "
---|---|---|---|---|---|
1 | \n", "No log | \n", "0.655867 | \n", "0.691275 | \n", "0.817460 | \n", "0.408730 | \n", "
2 | \n", "0.577800 | \n", "0.639771 | \n", "0.763183 | \n", "0.848930 | \n", "0.650629 | \n", "
3 | \n", "0.577800 | \n", "0.507809 | \n", "0.766059 | \n", "0.849197 | \n", "0.663915 | \n", "
4 | \n", "0.528700 | \n", "0.523820 | \n", "0.770853 | \n", "0.852195 | \n", "0.671300 | \n", "
5 | \n", "0.528700 | \n", "0.480276 | \n", "0.794823 | \n", "0.861757 | \n", "0.731994 | \n", "
6 | \n", "0.499800 | \n", "0.506056 | \n", "0.776606 | \n", "0.855906 | \n", "0.679552 | \n", "
7 | \n", "0.499800 | \n", "0.475724 | \n", "0.795781 | \n", "0.863198 | \n", "0.730276 | \n", "
8 | \n", "0.482900 | \n", "0.494971 | \n", "0.790988 | \n", "0.860614 | \n", "0.721495 | \n", "
9 | \n", "0.482900 | \n", "0.478771 | \n", "0.786194 | \n", "0.858592 | \n", "0.710239 | \n", "
10 | \n", "0.465700 | \n", "0.502414 | \n", "0.780441 | \n", "0.858903 | \n", "0.682151 | \n", "
11 | \n", "0.465700 | \n", "0.498116 | \n", "0.794823 | \n", "0.866584 | \n", "0.711300 | \n", "
12 | \n", "0.461800 | \n", "0.537117 | \n", "0.780441 | \n", "0.860281 | \n", "0.673988 | \n", "
13 | \n", "0.461800 | \n", "0.465851 | \n", "0.802493 | \n", "0.868286 | \n", "0.736825 | \n", "
14 | \n", "0.445000 | \n", "0.487390 | \n", "0.795781 | \n", "0.865953 | \n", "0.718691 | \n", "
15 | \n", "0.435600 | \n", "0.440423 | \n", "0.801534 | \n", "0.864440 | \n", "0.747068 | \n", "
16 | \n", "0.435600 | \n", "0.483897 | \n", "0.803452 | \n", "0.869344 | \n", "0.736413 | \n", "
17 | \n", "0.423500 | \n", "0.461727 | \n", "0.806328 | \n", "0.872152 | \n", "0.736471 | \n", "
18 | \n", "0.423500 | \n", "0.491034 | \n", "0.794823 | \n", "0.865915 | \n", "0.714590 | \n", "
19 | \n", "0.410400 | \n", "0.451404 | \n", "0.806328 | \n", "0.868490 | \n", "0.750608 | \n", "
20 | \n", "0.410400 | \n", "0.439862 | \n", "0.808245 | \n", "0.872611 | \n", "0.742507 | \n", "
21 | \n", "0.408100 | \n", "0.443258 | \n", "0.794823 | \n", "0.865915 | \n", "0.714590 | \n", "
22 | \n", "0.408100 | \n", "0.450756 | \n", "0.805369 | \n", "0.871438 | \n", "0.735522 | \n", "
23 | \n", "0.404600 | \n", "0.483001 | \n", "0.797699 | \n", "0.867379 | \n", "0.720558 | \n", "
24 | \n", "0.404600 | \n", "0.481094 | \n", "0.794823 | \n", "0.866417 | \n", "0.712134 | \n", "
25 | \n", "0.397200 | \n", "0.509731 | \n", "0.798658 | \n", "0.867925 | \n", "0.722269 | \n", "
26 | \n", "0.397200 | \n", "0.468457 | \n", "0.813998 | \n", "0.872870 | \n", "0.763221 | \n", "
27 | \n", "0.388100 | \n", "0.450646 | \n", "0.802493 | \n", "0.869785 | \n", "0.730527 | \n", "
28 | \n", "0.379900 | \n", "0.518912 | \n", "0.800575 | \n", "0.868852 | \n", "0.726426 | \n", "
29 | \n", "0.379900 | \n", "0.474939 | \n", "0.803452 | \n", "0.870988 | \n", "0.729257 | \n", "
30 | \n", "0.375800 | \n", "0.468194 | \n", "0.799616 | \n", "0.868636 | \n", "0.723207 | \n", "
31 | \n", "0.375800 | \n", "0.447116 | \n", "0.810163 | \n", "0.872423 | \n", "0.750818 | \n", "
32 | \n", "0.370700 | \n", "0.537091 | \n", "0.802493 | \n", "0.870113 | \n", "0.729057 | \n", "
33 | \n", "0.370700 | \n", "0.475261 | \n", "0.807287 | \n", "0.871071 | \n", "0.744834 | \n", "
34 | \n", "0.367900 | \n", "0.487207 | \n", "0.802493 | \n", "0.870603 | \n", "0.726799 | \n", "
35 | \n", "0.367900 | \n", "0.437785 | \n", "0.806328 | \n", "0.871338 | \n", "0.739932 | \n", "
36 | \n", "0.358800 | \n", "0.508899 | \n", "0.808245 | \n", "0.872774 | \n", "0.741834 | \n", "
37 | \n", "0.358800 | \n", "0.552409 | \n", "0.800575 | \n", "0.869347 | \n", "0.724147 | \n", "
38 | \n", "0.355700 | \n", "0.496687 | \n", "0.802493 | \n", "0.871571 | \n", "0.722093 | \n", "
39 | \n", "0.355700 | \n", "0.504841 | \n", "0.816874 | \n", "0.875570 | \n", "0.764464 | \n", "
40 | \n", "0.345500 | \n", "0.483254 | \n", "0.790988 | \n", "0.865929 | \n", "0.696008 | \n", "
41 | \n", "0.345500 | \n", "0.512504 | \n", "0.796740 | \n", "0.868323 | \n", "0.711472 | \n", "
42 | \n", "0.351700 | \n", "0.497110 | \n", "0.800575 | \n", "0.870486 | \n", "0.718576 | \n", "
43 | \n", "0.339900 | \n", "0.471216 | \n", "0.798658 | \n", "0.867758 | \n", "0.723036 | \n", "
44 | \n", "0.339900 | \n", "0.531487 | \n", "0.805369 | \n", "0.870783 | \n", "0.738304 | \n", "
45 | \n", "0.341300 | \n", "0.540843 | \n", "0.807287 | \n", "0.870740 | \n", "0.746104 | \n", "
46 | \n", "0.341300 | \n", "0.476809 | \n", "0.803452 | \n", "0.869841 | \n", "0.734334 | \n", "
47 | \n", "0.337400 | \n", "0.479455 | \n", "0.819751 | \n", "0.877124 | \n", "0.769497 | \n", "
48 | \n", "0.337400 | \n", "0.446018 | \n", "0.815916 | \n", "0.875163 | \n", "0.762399 | \n", "
49 | \n", "0.334200 | \n", "0.548959 | \n", "0.813039 | \n", "0.875080 | \n", "0.751826 | \n", "
50 | \n", "0.334200 | \n", "0.500371 | \n", "0.797699 | \n", "0.867379 | \n", "0.720558 | \n", "
51 | \n", "0.331700 | \n", "0.503151 | \n", "0.808245 | \n", "0.871134 | \n", "0.748301 | \n", "
52 | \n", "0.331700 | \n", "0.556216 | \n", "0.798658 | \n", "0.868586 | \n", "0.719129 | \n", "
53 | \n", "0.326700 | \n", "0.478857 | \n", "0.816874 | \n", "0.875245 | \n", "0.765550 | \n", "
54 | \n", "0.326700 | \n", "0.508674 | \n", "0.806328 | \n", "0.870347 | \n", "0.743885 | \n", "
55 | \n", "0.326700 | \n", "0.510241 | \n", "0.807287 | \n", "0.870740 | \n", "0.746104 | \n", "
56 | \n", "0.327800 | \n", "0.510437 | \n", "0.803452 | \n", "0.870335 | \n", "0.732197 | \n", "
57 | \n", "0.327800 | \n", "0.516560 | \n", "0.804410 | \n", "0.871212 | \n", "0.732419 | \n", "
58 | \n", "0.320600 | \n", "0.482175 | \n", "0.810163 | \n", "0.872258 | \n", "0.751428 | \n", "
59 | \n", "0.320600 | \n", "0.534551 | \n", "0.809204 | \n", "0.870527 | \n", "0.754025 | \n", "
60 | \n", "0.311600 | \n", "0.529513 | \n", "0.804410 | \n", "0.869063 | \n", "0.741350 | \n", "
61 | \n", "0.311600 | \n", "0.529038 | \n", "0.812081 | \n", "0.872892 | \n", "0.756299 | \n", "
62 | \n", "0.317900 | \n", "0.551885 | \n", "0.797699 | \n", "0.866032 | \n", "0.726558 | \n", "
63 | \n", "0.317900 | \n", "0.500419 | \n", "0.808245 | \n", "0.870968 | \n", "0.748917 | \n", "
64 | \n", "0.315100 | \n", "0.466086 | \n", "0.809204 | \n", "0.871861 | \n", "0.749251 | \n", "
65 | \n", "0.315100 | \n", "0.492729 | \n", "0.811122 | \n", "0.872821 | \n", "0.752984 | \n", "
66 | \n", "0.306300 | \n", "0.463267 | \n", "0.813998 | \n", "0.874352 | \n", "0.758209 | \n", "
67 | \n", "0.306300 | \n", "0.568536 | \n", "0.811122 | \n", "0.872821 | \n", "0.752984 | \n", "
68 | \n", "0.308500 | \n", "0.539011 | \n", "0.803452 | \n", "0.868167 | \n", "0.741052 | \n", "
69 | \n", "0.308500 | \n", "0.526197 | \n", "0.808245 | \n", "0.871300 | \n", "0.747680 | \n", "
70 | \n", "0.304900 | \n", "0.506041 | \n", "0.811122 | \n", "0.872657 | \n", "0.753583 | \n", "
71 | \n", "0.302700 | \n", "0.581929 | \n", "0.798658 | \n", "0.866751 | \n", "0.727493 | \n", "
72 | \n", "0.302700 | \n", "0.516497 | \n", "0.810163 | \n", "0.872258 | \n", "0.751428 | \n", "
73 | \n", "0.308000 | \n", "0.507128 | \n", "0.807287 | \n", "0.870239 | \n", "0.747969 | \n", "
74 | \n", "0.308000 | \n", "0.520996 | \n", "0.803452 | \n", "0.868167 | \n", "0.741052 | \n", "
75 | \n", "0.304900 | \n", "0.517548 | \n", "0.806328 | \n", "0.869677 | \n", "0.746406 | \n", "
76 | \n", "0.304900 | \n", "0.503817 | \n", "0.804410 | \n", "0.868726 | \n", "0.742634 | \n", "
77 | \n", "0.298100 | \n", "0.508880 | \n", "0.809204 | \n", "0.871530 | \n", "0.750476 | \n", "
78 | \n", "0.298100 | \n", "0.505606 | \n", "0.808245 | \n", "0.870801 | \n", "0.749527 | \n", "
79 | \n", "0.304900 | \n", "0.526573 | \n", "0.802493 | \n", "0.867609 | \n", "0.739465 | \n", "
80 | \n", "0.304900 | \n", "0.523581 | \n", "0.804410 | \n", "0.868726 | \n", "0.742634 | \n", "
"
],
"text/plain": [
"