{ "cells": [ { "cell_type": "markdown", "id": "19c25879-e13a-4f5e-8b5a-67d6bb77c3f6", "metadata": { "tags": [] }, "source": [ "# Intro" ] }, { "cell_type": "code", "execution_count": 1, "id": "ca485005-54c1-4126-8c1e-53ca633b7f26", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Python version is: 3.10.11\n", "Torch version is: 1.13.1+cu117\n", "Nvidia device is: NVIDIA GeForce RTX 4090\n", "Transformers version is: 4.32.1\n", "Adapterhub not found!!!\n" ] } ], "source": [ "from transformers import GPT2TokenizerFast, GPT2Model, DataCollatorWithPadding\n", "from transformers.modeling_outputs import SequenceClassifierOutputWithPast\n", "import torch\n", "import torch.nn as nn\n", "from utils import print_system_info\n", "from typing import Literal, Optional, List, Dict, Callable\n", "from types import SimpleNamespace\n", "from dataclasses import dataclass\n", "\n", "print_system_info()" ] }, { "cell_type": "code", "execution_count": 2, "id": "931ebd25-5e5a-4fdf-b2db-92d4ccf7f88e", "metadata": { "tags": [] }, "outputs": [], "source": [ "DEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", "MODEL_NAME = 'gpt2'\n", "NAMESPACE = 'sadcl'\n", "\n", "INIT_TEXT = \"sentiment or value or relation of the previous text is\"\n", "N_LAST_LAYERS = 10\n", "\n", "N_TOKENS = 5" ] }, { "cell_type": "markdown", "id": "e879d47e-0b67-452a-91c2-f36383efbed8", "metadata": {}, "source": [ "# Class" ] }, { "cell_type": "code", "execution_count": 3, "id": "7c61fcde-f9e7-4d30-b989-511010e6298b", "metadata": { "tags": [] }, "outputs": [], "source": [ "def initialize_embedding(\n", " emb_dim: int,\n", " n_tokens: int, \n", " random_range: float,\n", " initialize_from: Optional[torch.Tensor]\n", "):\n", " if initialize_from is None:\n", " return torch.FloatTensor(n_tokens, emb_dim).uniform_(-random_range, random_range)\n", "\n", " assert initialize_from.shape[0] >= n_tokens\n", " assert initialize_from.shape[1] == emb_dim\n", " return initialize_from[:n_tokens, :].detach().clone()\n", "\n", "class SoftEmbedding(nn.Module):\n", " def __init__(\n", " self,\n", " emb_dim: int,\n", " n_tokens: int,\n", " first_layer_flag: bool = False,\n", " random_range: float = 0.1,\n", " initialize_from: Optional[torch.Tensor] = None\n", " ):\n", " super().__init__()\n", " \n", " self.emb_dim = emb_dim\n", " self.n_tokens = n_tokens\n", " self.first_layer_flag = first_layer_flag\n", " \n", " self.sadcl_learned_embedding = nn.parameter.Parameter(\n", " initialize_embedding(\n", " emb_dim,\n", " n_tokens,\n", " random_range,\n", " initialize_from\n", " )\n", " )\n", " # self.sadcl_mlp = nn.Sequential(\n", " # nn.Linear(emb_dim, 24, bias=False),\n", " # nn.ReLU(),\n", " # nn.Linear(24, 768, bias=False)\n", " # )\n", "\n", " assert self.sadcl_learned_embedding.shape == (n_tokens, emb_dim)\n", " \n", " def forward(self, input_embedding, attention_mask, sequnce_lengths):\n", " # input_embedding.shape = (batch_size, num_of_input_tokens+n_tokens, emb_dim)\n", " # output_embedding = []\n", " \n", " learned_embedding = self.sadcl_learned_embedding# + self.sadcl_mlp(self.sadcl_learned_embedding)\n", " \n", " batch_size = input_embedding.size(0)\n", " learned_embedding = learned_embedding.repeat(batch_size, 1, 1) # (batch_size, n_tokens, emb_dim)\n", " \n", " attention_mask_shift = torch.zeros((batch_size, 1, 1, self.n_tokens), device=attention_mask.device)\n", " attention_mask = torch.cat([attention_mask_shift, attention_mask[:, :, :, :-self.n_tokens]], dim=-1)\n", " if self.first_layer_flag:\n", " output_embedding = torch.cat([learned_embedding, input_embedding[:, :-self.n_tokens]], dim=1)\n", " else:\n", " output_embedding = torch.cat([learned_embedding, input_embedding[:, self.n_tokens:]], dim=1)\n", " # print(attention_mask == 0)\n", " return output_embedding, attention_mask\n", " \n", " def get_weights(self):\n", " return self.sadcl_learned_embedding.detach().clone()\n", "\n", "\n", "class GPT2ModuleWrapper(nn.Module):\n", " def __init__(\n", " self,\n", " module,\n", " emb_dim:int,\n", " n_tokens:int,\n", " get_sequnce_lengths:int,\n", " first_layer_flag:bool,\n", " initialize_from:Optional[torch.Tensor] = None\n", " ):\n", " super().__init__()\n", " self.original_module = module\n", " self.soft_prompt = SoftEmbedding(\n", " emb_dim=emb_dim,\n", " n_tokens=n_tokens,\n", " first_layer_flag=first_layer_flag,\n", " initialize_from=initialize_from\n", " )\n", " self.get_sequnce_lengths = get_sequnce_lengths\n", " \n", " \n", " def forward(self, hidden_states, *args, **kwargs):\n", " output_embedding, attention_mask = self.soft_prompt(\n", " hidden_states,\n", " kwargs['attention_mask'],\n", " self.get_sequnce_lengths()\n", " )\n", " kwargs['attention_mask'] = attention_mask\n", " return self.original_module(output_embedding, *args, **kwargs)\n", "\n", "class GPT2Injector:\n", " def __init__(self):\n", " self.sequnce_lengths = None\n", " \n", " def get_sequnce_lengths(self):\n", " return self.sequnce_lengths\n", " \n", " def _mutate_model_forward(self, model):\n", " old_forward = model.forward\n", " pad_token_id = model.config.pad_token_id\n", " def new_forward(*args, **kwargs):\n", " input_ids = kwargs['input_ids']\n", " self.sequnce_lengths = (\n", " torch.eq(input_ids, pad_token_id).long().argmax(-1) - 1\n", " ).detach().cpu().tolist()\n", " return old_forward(*args, **kwargs)\n", " model.forward = new_forward\n", " \n", " def _reverse_mutate_model_forward(self, model):\n", " orig_class = type(model)\n", " model.forward = orig_class.forward.__get__(model, orig_class)\n", " \n", " def mutate(self, model, n_layers, n_tokens, init_prompts):\n", " self._mutate_model_forward(model)\n", " module_list = manager.model.h\n", " start = len(module_list) - n_layers\n", " for idx in range(start, len(module_list)):\n", " module_list[idx] = GPT2ModuleWrapper(\n", " module=module_list[idx],\n", " emb_dim=model.embed_dim,\n", " n_tokens=n_tokens,\n", " get_sequnce_lengths=self.get_sequnce_lengths,\n", " first_layer_flag=(idx == start),\n", " initialize_from=init_prompts[idx][0]\n", " )\n", " return module_list[start:]\n", " \n", " def reverse_mutate(self, model):\n", " self._reverse_mutate_model_forward(model)\n", " module_list = model.h\n", " for idx in range(len(module_list)):\n", " if type(module_list[idx]) is GPT2ModuleWrapper:\n", " module_list[idx] = module_list[idx].original_module\n" ] }, { "cell_type": "code", "execution_count": 4, "id": "f215af71-8f06-4466-a1cc-bf27b1193627", "metadata": { "tags": [] }, "outputs": [], "source": [ "class MixHeadModel(nn.Module):\n", " def __init__(self, model, head):\n", " super().__init__()\n", " self.model = model\n", " self.sadcl_head = head\n", " \n", " def forward(self, *args, **kwargs):\n", " labels = kwargs.pop('labels', None)\n", " transformer_outputs = self.model(*args, **kwargs)\n", " out = self.sadcl_head(\n", " transformer_outputs=transformer_outputs,\n", " labels=labels\n", " )\n", " return out" ] }, { "cell_type": "code", "execution_count": 5, "id": "cea800ea-d538-4aab-8aca-41feaba49b7d", "metadata": { "tags": [] }, "outputs": [], "source": [ "class GPT2ClassificationHead(nn.Module):\n", " def __init__(\n", " self,\n", " emb_dim: int,\n", " n_labels: int,\n", " get_sequnce_lengths: Callable[[], List[int]],\n", " n_tokens: int,\n", " init_range: float,\n", " bias=True\n", " ):\n", " super().__init__()\n", " \n", " self.get_sequnce_lengths = get_sequnce_lengths\n", " self.n_labels = n_labels\n", " self.n_tokens = n_tokens\n", " self.loss_func = nn.CrossEntropyLoss()\n", " \n", " self.score = nn.Linear(emb_dim, n_labels, bias) # Bias is false in huggingface implementation\n", " \n", " self._init_weights(init_range)\n", " \n", " def _init_weights(self, init_range):\n", " self.score.weight.data.normal_(mean=0.0, std=init_range)\n", " if self.score.bias is not None:\n", " self.score.bias.data.zero_()\n", " \n", " def forward(self, transformer_outputs, labels=None):\n", " last_text_token_per_batch = self.get_sequnce_lengths()\n", " last_prompt_token_per_batch = [\n", " seqlen + self.n_tokens for seqlen in last_text_token_per_batch\n", " ]\n", " last_hidden_state = transformer_outputs.last_hidden_state\n", " batch_size = last_hidden_state.size(0)\n", " \n", " # last_text_token = last_hidden_state[range(batch_size), last_text_token_per_batch]\n", " last_prompt_token = last_hidden_state[range(batch_size), last_prompt_token_per_batch]\n", " logits = self.score(last_prompt_token)\n", " \n", " loss = None\n", " if labels is not None:\n", " loss = self.loss_func(logits.view(-1, self.n_labels), labels.view(-1))\n", " \n", " return SequenceClassifierOutputWithPast(\n", " loss=loss,\n", " logits=logits,\n", " past_key_values=transformer_outputs.past_key_values,\n", " hidden_states=transformer_outputs.hidden_states,\n", " attentions=transformer_outputs.attentions,\n", " )" ] }, { "cell_type": "code", "execution_count": 6, "id": "577784eb-ab61-424d-a633-7b030d6d06d3", "metadata": { "tags": [] }, "outputs": [], "source": [ "@dataclass\n", "class PEFTConfig:\n", " name: str\n", " kind: Literal['regression', 'classification', 'generation']\n", " n_labels: Optional[int] # only for classification\n", " @classmethod\n", " def classification(cls, name: str, n_labels: int):\n", " return cls(name=name, n_labels=n_labels, kind='classification')\n", "\n", "class GPT2LLL:\n", " def __init__(\n", " self,\n", " n_tokens=N_TOKENS,\n", " n_last_layers=N_LAST_LAYERS,\n", " model_name=MODEL_NAME,\n", " device=DEVICE,\n", " init_text=INIT_TEXT\n", " ):\n", " self.n_tokens = n_tokens\n", " self.n_last_layers = n_last_layers\n", " self.model_name = model_name\n", " self.device = device\n", " \n", " self.pefts = {}\n", " \n", " self.tokenizer = GPT2TokenizerFast.from_pretrained(model_name, add_prefix_space=True)\n", " self.tokenizer.pad_token = self.tokenizer.eos_token\n", " \n", " self.model = GPT2Model.from_pretrained(model_name, pad_token_id=self.tokenizer.pad_token_id)\n", " self.model.to(device);\n", " \n", " init_tokens = self.tokenizer(init_text, return_tensors='pt').to(device)\n", " with torch.no_grad():\n", " self.init_prompts = self.model(**init_tokens, output_hidden_states=True).hidden_states\n", " \n", " self.current_peft_name = None\n", " self.current_mix_model = None\n", " \n", " @property\n", " def current_peft(self):\n", " if self.current_peft_name is None:\n", " return None\n", " return self.pefts[self.current_peft_name]\n", " \n", " def generate_tokenizer_map(self):\n", " n_tokens = self.n_tokens\n", " tokenizer = self.tokenizer\n", " def return_function(rows):\n", " outputs_dict = tokenizer(rows)\n", " for row in outputs_dict['input_ids']:\n", " row.extend([tokenizer.pad_token_id] * n_tokens)\n", " for row in outputs_dict['attention_mask']:\n", " row.extend([0] * n_tokens)\n", " return outputs_dict\n", " return return_function\n", " \n", " def activate_peft(self, name):\n", " self.current_peft_name = name\n", " \n", " self.current_peft.injector.mutate(\n", " model=self.model,\n", " n_layers=self.n_last_layers,\n", " n_tokens=self.n_tokens,\n", " init_prompts=self.init_prompts\n", " )\n", " self.current_mix_model = MixHeadModel(\n", " head=self.current_peft.head,\n", " model=self.model\n", " )\n", " \n", " def auto_freeze(self):\n", " print(\"Unfreezed params are:\")\n", " for param_name, weights in self.current_mix_model.named_parameters():\n", " if NAMESPACE in param_name:\n", " weights.requires_grad = True\n", " print(\"- \" + param_name)\n", " else:\n", " weights.requires_grad = False\n", " \n", " def add_peft(self, config: PEFTConfig):\n", " assert config.name not in self.pefts\n", " injector = GPT2Injector()\n", " head = GPT2ClassificationHead(\n", " emb_dim=self.model.embed_dim,\n", " n_labels=config.n_labels,\n", " get_sequnce_lengths=injector.get_sequnce_lengths,\n", " n_tokens=self.n_tokens,\n", " init_range=self.model.config.initializer_range,\n", " bias=False\n", " )\n", " head.to(self.device)\n", " self.pefts[config.name] = SimpleNamespace(\n", " head=head,\n", " injector=injector\n", " )" ] }, { "cell_type": "markdown", "id": "8fcfcb04-6513-4321-917f-d13c2dba886e", "metadata": { "tags": [] }, "source": [ "# Train" ] }, { "cell_type": "markdown", "id": "003fb992-fb75-4655-b60a-284ef0dcf4eb", "metadata": { "tags": [] }, "source": [ "## Prepare" ] }, { "cell_type": "code", "execution_count": 7, "id": "cfe42619-bb12-430e-9359-5ee2d2e40bdc", "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Unfreezed params are:\n", "- model.h.2.soft_prompt.sadcl_learned_embedding\n", "- model.h.3.soft_prompt.sadcl_learned_embedding\n", "- model.h.4.soft_prompt.sadcl_learned_embedding\n", "- model.h.5.soft_prompt.sadcl_learned_embedding\n", "- model.h.6.soft_prompt.sadcl_learned_embedding\n", "- model.h.7.soft_prompt.sadcl_learned_embedding\n", "- model.h.8.soft_prompt.sadcl_learned_embedding\n", "- model.h.9.soft_prompt.sadcl_learned_embedding\n", "- model.h.10.soft_prompt.sadcl_learned_embedding\n", "- model.h.11.soft_prompt.sadcl_learned_embedding\n", "- sadcl_head.score.weight\n" ] } ], "source": [ "peft_name = 'peft1'\n", "\n", "manager = GPT2LLL()\n", "manager.add_peft(PEFTConfig.classification(name=peft_name, n_labels=2))\n", "manager.activate_peft(peft_name)\n", "manager.auto_freeze()" ] }, { "cell_type": "code", "execution_count": 8, "id": "072bf63b-de2f-4c05-a6a2-6fde3bb5aa6d", "metadata": { "tags": [] }, "outputs": [], "source": [ "from config import load_config\n", "config = load_config('config.yaml')" ] }, { "cell_type": "code", "execution_count": 9, "id": "3b3827aa-a61c-4e34-9e67-86768bd8b446", "metadata": { "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Found cached dataset glue (/home/mohalisad/.cache/huggingface/datasets/glue/cola/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "ac4726d36f6241c59be6dbeee759fce2", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/3 [00:00, ?it/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stderr", "output_type": "stream", "text": [ "Loading cached processed dataset at /home/mohalisad/.cache/huggingface/datasets/glue/cola/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad/cache-f7a02c6d65621ecd.arrow\n", "Loading cached processed dataset at /home/mohalisad/.cache/huggingface/datasets/glue/cola/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad/cache-c36341ab82d2d37d.arrow\n", "Loading cached processed dataset at /home/mohalisad/.cache/huggingface/datasets/glue/cola/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad/cache-9f7663dac81ea13b.arrow\n" ] } ], "source": [ "from datasets import load_dataset\n", "dataset = load_dataset('glue', 'cola')\n", "tokenizer_map = manager.generate_tokenizer_map()\n", "dataset = dataset.map(lambda x: tokenizer_map(x['sentence']), batched=True)\n", "dataset.set_format(type='torch', columns=[\n", " 'input_ids', 'attention_mask', 'label' # 'token_type_ids',\n", "])" ] }, { "cell_type": "markdown", "id": "5331fedd-e6ec-4387-a1ec-55488d144f45", "metadata": {}, "source": [ "## Training" ] }, { "cell_type": "code", "execution_count": 15, "id": "e13c9012-089f-45c1-baea-4f0850ccfbaa", "metadata": { "tags": [] }, "outputs": [ { "data": { "text/html": [ "\n", "
Epoch | \n", "Training Loss | \n", "Validation Loss | \n", "Accuracy | \n", "F1-score-1 | \n", "F1-score-ma | \n", "
---|---|---|---|---|---|
1 | \n", "No log | \n", "0.617917 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
2 | \n", "0.618200 | \n", "0.620259 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
3 | \n", "0.618200 | \n", "0.612236 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
4 | \n", "0.616500 | \n", "0.613789 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
5 | \n", "0.616500 | \n", "0.615989 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
6 | \n", "0.612800 | \n", "0.614961 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
7 | \n", "0.612800 | \n", "0.612622 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
8 | \n", "0.611300 | \n", "0.613691 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
9 | \n", "0.611300 | \n", "0.613889 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
10 | \n", "0.609400 | \n", "0.616157 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
11 | \n", "0.609400 | \n", "0.614404 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
12 | \n", "0.609700 | \n", "0.614005 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
13 | \n", "0.609700 | \n", "0.611722 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
14 | \n", "0.607100 | \n", "0.609891 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
15 | \n", "0.606600 | \n", "0.612338 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
16 | \n", "0.606600 | \n", "0.614802 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
17 | \n", "0.604600 | \n", "0.614289 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
18 | \n", "0.604600 | \n", "0.610662 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
19 | \n", "0.603600 | \n", "0.610867 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
20 | \n", "0.603600 | \n", "0.615460 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
21 | \n", "0.602600 | \n", "0.612030 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
22 | \n", "0.602600 | \n", "0.611254 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
23 | \n", "0.601900 | \n", "0.612736 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
24 | \n", "0.601900 | \n", "0.613839 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
25 | \n", "0.604800 | \n", "0.612303 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
26 | \n", "0.604800 | \n", "0.612139 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
27 | \n", "0.603400 | \n", "0.612106 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
28 | \n", "0.602300 | \n", "0.614560 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
29 | \n", "0.602300 | \n", "0.613581 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
30 | \n", "0.602800 | \n", "0.615965 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
31 | \n", "0.602800 | \n", "0.613715 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
32 | \n", "0.601400 | \n", "0.613545 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
33 | \n", "0.601400 | \n", "0.612631 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
34 | \n", "0.601400 | \n", "0.611881 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
35 | \n", "0.601400 | \n", "0.614503 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
36 | \n", "0.600700 | \n", "0.610912 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
37 | \n", "0.600700 | \n", "0.611916 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
38 | \n", "0.600800 | \n", "0.611409 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
39 | \n", "0.600800 | \n", "0.613652 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
40 | \n", "0.600600 | \n", "0.612413 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
41 | \n", "0.600600 | \n", "0.613673 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
42 | \n", "0.600400 | \n", "0.611154 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
43 | \n", "0.600000 | \n", "0.611216 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
44 | \n", "0.600000 | \n", "0.610118 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
45 | \n", "0.601900 | \n", "0.611573 | \n", "0.692234 | \n", "0.817717 | \n", "0.415012 | \n", "
46 | \n", "0.601900 | \n", "0.613571 | \n", "0.692234 | \n", "0.817924 | \n", "0.412058 | \n", "
47 | \n", "0.598700 | \n", "0.611853 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
48 | \n", "0.598700 | \n", "0.611213 | \n", "0.691275 | \n", "0.817253 | \n", "0.411713 | \n", "
49 | \n", "0.597600 | \n", "0.611855 | \n", "0.692234 | \n", "0.817924 | \n", "0.412058 | \n", "
50 | \n", "0.597600 | \n", "0.611871 | \n", "0.692234 | \n", "0.817924 | \n", "0.412058 | \n", "
51 | \n", "0.600100 | \n", "0.612086 | \n", "0.692234 | \n", "0.817924 | \n", "0.412058 | \n", "
52 | \n", "0.600100 | \n", "0.610666 | \n", "0.692234 | \n", "0.817924 | \n", "0.412058 | \n", "
53 | \n", "0.599600 | \n", "0.613406 | \n", "0.692234 | \n", "0.817924 | \n", "0.412058 | \n", "
54 | \n", "0.599600 | \n", "0.617041 | \n", "0.692234 | \n", "0.817924 | \n", "0.412058 | \n", "
"
],
"text/plain": [
"