12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273 |
- import torch
-
- from data.config import Config
- from data.twitter.data_loader import TwitterDatasetLoader
-
-
- class TwitterConfig(Config):
- name = 'twitter'
- DatasetLoader = TwitterDatasetLoader
-
- data_path = '../../../../../media/external_3TB/3TB/ghorbanpoor/twitter/'
- # data_path = '/home/faeze/PycharmProjects/fake_news_detection/data/twitter/'
- output_path = '../../../../../media/external_10TB/10TB/ghorbanpoor/'
- # output_path = ''
-
- train_image_path = data_path + 'images_train/'
- validation_image_path = data_path + 'images_test/'
- test_image_path = data_path + 'images_test/'
-
- train_text_path = data_path + 'twitter_train_translated.csv'
- validation_text_path = data_path + 'twitter_test_translated.csv'
- test_text_path = data_path + 'twitter_test_translated.csv'
-
- batch_size = 128
- epochs = 100
- num_workers = 2
- head_lr = 0.0254
- image_encoder_lr = 0.0005
- text_encoder_lr = 2.0e-05
- attention_lr = 1e-3
- classification_lr = 0.0034
-
- dropout = 0.5
- hidden_size = 128
- projection_size = 64
-
- head_weight_decay = 0.05
- attention_weight_decay = 0.05
- classification_weight_decay = 0.05
-
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
-
- image_model_name = '../../../../../media/external_10TB/10TB/ghorbanpoor/huggingface/vit-base-patch16-224'
- image_embedding = 768
- text_encoder_model = "../../../../../media/external_10TB/10TB/ghorbanpoor/huggingface/roberta-base"
- # text_encoder_model = "/home/faeze/PycharmProjects/new_fake_news_detectioin/bert/bert-base-uncased"
- text_tokenizer = "../../../../../media/external_10TB/10TB/ghorbanpoor/huggingface/roberta-base"
- # text_tokenizer = "/home/faeze/PycharmProjects/new_fake_news_detectioin/bert/bert-base-uncased"
- text_embedding = 768
- max_length = 32
-
- pretrained = True
- trainable = False
- temperature = 1.0
-
- classes = ['real', 'fake']
- class_weights = [1, 1]
-
- wanted_accuracy = 0.76
-
- def optuna(self, trial):
- self.head_lr = trial.suggest_loguniform('head_lr', 1e-5, 1e-1)
- self.image_encoder_lr = trial.suggest_loguniform('image_encoder_lr', 1e-6, 1e-3)
- self.text_encoder_lr = trial.suggest_loguniform('text_encoder_lr', 1e-6, 1e-3)
- self.classification_lr = trial.suggest_loguniform('classification_lr', 1e-5, 1e-1)
-
- self.head_weight_decay = trial.suggest_loguniform('head_weight_decay', 1e-5, 1e-1)
- self.attention_weight_decay = trial.suggest_loguniform('attention_weight_decay', 1e-5, 1e-1)
- # self.classification_weight_decay = trial.suggest_loguniform('classification_weight_decay', 1e-5, 1e-1)
-
- self.projection_size = trial.suggest_categorical('projection_size', [256, 128, 64])
- # self.hidden_size = trial.suggest_categorical('hidden_size', [256, 128, 64, ])
- self.dropout = trial.suggest_categorical('drop_out', [0.1, 0.3, 0.5, ])
|