Browse Source

Add files via upload

main
rucv 1 year ago
parent
commit
b371fba7de
No account linked to committer's email address

+ 66
- 0
datasets/EndoScene.py View File

@@ -0,0 +1,66 @@
#Adopted from ACSNet

import torch
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms.functional as F
from torchvision import transforms
import os
from PIL import Image
import os.path as osp
from utils.transform import *


# EndoScene Dataset
class EndoScene(Dataset):
def __init__(self, root, data_dir, mode='train', transform=None):
super(EndoScene, self).__init__()
data_path1 = osp.join(root, data_dir)
#data_path2 = osp.join(root, data_dir) + '/CVC-612'
self.imglist = []
self.gtlist = []

datalist1 = os.listdir(osp.join(data_path1, 'image'))
for data1 in datalist1:
self.imglist.append(osp.join(data_path1 + '/image', data1))
self.gtlist.append(osp.join(data_path1 + '/gtpolyp', data1))

#datalist2 = os.listdir(osp.join(data_path2, 'image'))
#for data2 in datalist2:
#self.imglist.append(osp.join(data_path2 + '/image', data2))
#self.gtlist.append(osp.join(data_path2 + '/gtpolyp', data2))

if transform is None:
if mode == 'train':
transform = transforms.Compose([
Resize((320, 320)),
RandomHorizontalFlip(),
RandomVerticalFlip(),
RandomRotation(90),
RandomZoom((0.9, 1.1)),
#Translation(10),
RandomCrop((256, 256)),
#transforms.Normalize([0.485, 0.456, 0.406],
#[0.229, 0.224, 0.225])]
ToTensor(),

])
elif mode == 'valid' or mode == 'test':
transform = transforms.Compose([
Resize((320, 320)),
ToTensor(),
])
self.transform = transform

def __getitem__(self, index):
img_path = self.imglist[index]
gt_path = self.gtlist[index]
img = Image.open(img_path).convert('RGB')
gt = Image.open(gt_path).convert('L')
data = {'image': img, 'label': gt}
if self.transform:
data = self.transform(data)

return data

def __len__(self):
return len(self.imglist)

+ 2
- 0
datasets/__init__.py View File

@@ -0,0 +1,2 @@
from .EndoScene import EndoScene
from .kvasir_SEG import kvasir_SEG

BIN
datasets/__pycache__/EndoScene.cpython-35.pyc View File


BIN
datasets/__pycache__/EndoScene.cpython-36.pyc View File


BIN
datasets/__pycache__/__init__.cpython-35.pyc View File


BIN
datasets/__pycache__/__init__.cpython-36.pyc View File


BIN
datasets/__pycache__/kvasir_SEG.cpython-36.pyc View File


+ 91
- 0
datasets/kvasir_SEG.py View File

@@ -0,0 +1,91 @@
#Adopted from ACSNet
import os
import os.path as osp
from utils.transform import *
from torch.utils.data import Dataset
from torchvision import transforms


# KavSir-SEG Dataset
class kvasir_SEG(Dataset):
def __init__(self, root, data2_dir, mode='train', transform=None):
super(kvasir_SEG, self).__init__()
data_path = osp.join(root, data2_dir)
self.imglist = []
self.gtlist = []

datalist = os.listdir(osp.join(data_path, 'images'))
for data in datalist:
self.imglist.append(osp.join(data_path+'/images', data))
self.gtlist.append(osp.join(data_path+'/masks', data))

if transform is None:
if mode == 'train':
transform = transforms.Compose([
Resize((320,320 )),
RandomHorizontalFlip(),
RandomVerticalFlip(),
RandomRotation(90),
RandomZoom((0.9, 1.1)),
Translation(10),
RandomCrop((256, 256)),
ToTensor(),

])
elif mode == 'valid' or mode == 'test':
transform = transforms.Compose([
Resize((320, 320)),
ToTensor(),
])
self.transform = transform

def __getitem__(self, index):
img_path = self.imglist[index]
gt_path = self.gtlist[index]
img = Image.open(img_path).convert('RGB')
gt = Image.open(gt_path).convert('L')
data = {'image': img, 'label': gt}
if self.transform:
data = self.transform(data)

return data

def __len__(self):
return len(self.imglist)


class test_dataset:
def __init__(self, image_root, gt_root, testsize):
self.testsize = testsize
self.images = [image_root + f for f in os.listdir(image_root) if f.endswith('.jpg') or f.endswith('.png')]
self.gts = [gt_root + f for f in os.listdir(gt_root) if f.endswith('.tif') or f.endswith('.png')]
self.images = sorted(self.images)
self.gts = sorted(self.gts)
self.transform = transforms.Compose([
transforms.Resize((self.testsize, self.testsize)),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406],
#[0.229, 0.224, 0.225])])
self.gt_transform = transforms.ToTensor()
self.size = len(self.images)
self.index = 0

def load_data(self):
image = self.rgb_loader(self.images[self.index])
image = self.transform(image).unsqueeze(0)
gt = self.binary_loader(self.gts[self.index])
name = self.images[self.index].split('/')[-1]
if name.endswith('.jpg'):
name = name.split('.jpg')[0] + '.png'
self.index += 1
return image, gt, name

def rgb_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')

def binary_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('L')

Loading…
Cancel
Save