123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113 |
- import sys
- from typing import Tuple
- import argparse
- import os
- from abc import ABC, abstractmethod
- from ..models.model import Model
- from ..configs.base_config import BaseConfig, PhaseType
-
-
- class BaseEntrypoint(ABC):
- """Base class for all entrypoints.
- """
- description = ''
-
- def __init__(self, phase_type: PhaseType) -> None:
- super().__init__()
- self.phase_type = phase_type
-
- self.conf, self.model = self._get_conf_model()
- self.parser = self._create_parser(sys.argv[0], sys.argv[1])
- self.conf.update(self.parser.parse_args(sys.argv[2:]))
-
- def _create_parser(self, command: str, entrypoint_name: str) -> argparse.ArgumentParser:
- parser = argparse.ArgumentParser(
- prog=f'{os.path.basename(command)} {entrypoint_name}',
- description=self.description or None
- )
- parser.add_argument('--device', default='cpu', type=str,
- help='Analysis will run over device, use cpu, cuda:#, cuda (for all gpus)')
-
- parser.add_argument('--samples-dir', default=None, type=str,
- help='The directory/dataGroup to be evaluated. In the case of dataGroup can be train/test/val. In the case of directory must contain 0 and 1 subdirectories. Use:FilterName to do over samples containing /FilterName/ in their path')
-
- parser.add_argument('--final-model-dir', default=None, type=str,
- help='The directory to load the model from, when not given will be calculated!')
-
- parser.add_argument('--save-dir', default=None, type=str,
- help='The directory to save the model from, when not given will be calculated!')
-
- parser.add_argument('--report-dir', default=None, type=str,
- help='The dir to save reports per slice per sample in.')
-
- parser.add_argument('--epoch', default=None, type=str,
- help='The epoch to load.')
-
- parser.add_argument('--try-name', default=None, type=str,
- help='The run name specifying what run is doing')
-
- parser.add_argument('--try-num', default=None, type=int,
- help='The try number to load')
-
- parser.add_argument('--data-separation', default=None, type=str,
- help='The data_separation to be used.')
-
- parser.add_argument('--batch-size', default=None, type=int,
- help='The batch size to be used.')
-
- parser.add_argument('--pretrained-model-file', default=None, type=str,
- help='Address of .pt pretrained model')
-
- parser.add_argument('--max-epochs', default=None, type=int,
- help='The maximum epochs of training!')
-
- parser.add_argument('--big-batch-size', default=None, type=int,
- help='The big batch size (iteration per optimization)!')
-
- parser.add_argument('--iters-per-epoch', default=None, type=int,
- help='The number of big batches per epoch!')
-
- parser.add_argument('--interpretation-method', default=None, type=str,
- help='The method used for interpreting the results!')
-
- parser.add_argument('--cut-threshold', default=None, type=float,
- help='The threshold for cutting interpretations!')
-
- parser.add_argument('--global-threshold', action='store_true',
- help='Whether the given cut threshold must be applied global or to the relative values!')
-
- parser.add_argument('--dynamic-threshold', action='store_true',
- help='Whether to use dynamic threshold in interpretation!')
-
- parser.add_argument('--class-label-for-interpretation', default=None, type=int,
- help='The class label we want to explain why it has been chosen. None means the decision of the model!')
-
- parser.add_argument('--interpret-predictions-vs-gt', default='1', type=(lambda x: x == '1'),
- help='If the class_label for_interpretation is None this will be considered. If 1, interpretations would be done for the predicted label, otherwise for the ground truth.')
-
- parser.add_argument('--mapped-labels-to-use', default=None, type=(lambda x: [int(y) for y in x.split(',')]),
- help='The labels to do the analysis on them only (comma separated), default is all the labels.')
-
- parser.add_argument('--skip-overlay', action='store_true', default=None,
- help='Passing this flag prevents the interpretation phase from storing overlay images.')
-
- parser.add_argument('--skip-raw', action='store_true', default=None,
- help='Passing this flag prevents the interpretation phase from storing the raw interpretation values.')
-
- parser.add_argument('--overlay-only', action='store_true',
- help='Passing this flag makes the interpretation phase to store just overlay images '
- 'and not the `.npy` files.')
-
- parser.add_argument('--save-by-file-name', action='store_true',
- help='Saves sample-specific files by their file names only, not the whole path.')
-
- parser.add_argument('--n-interpretation-samples', default=None, type=int,
- help='The max number of samples to be interpreted.')
-
- parser.add_argument('--interpretation-tag-to-evaluate', default=None, type=str,
- help='The tag to be used as interpretation for evaluation phase, otherwise the first one will be used.')
- return parser
-
- @abstractmethod
- def _get_conf_model(self) -> Tuple[BaseConfig, Model]:
- pass
|