Source code for gluoncv.data.pascal_voc.segmentation

"""Pascal VOC Semantic Segmentation Dataset."""
import os
import numpy as np
from PIL import Image
from ..segbase import SegmentationDataset

[docs]class VOCSegmentation(SegmentationDataset): """Pascal VOC Semantic Segmentation Dataset. Parameters ---------- root : string Path to VOCdevkit folder. Default is '$(HOME)/mxnet/datasets/ade' split: string 'train', 'val' or 'test' transform : callable, optional A function that transforms the image Examples -------- >>> from mxnet.gluon.data.vision import transforms >>> # Transforms for Normalization >>> input_transform = transforms.Compose([ >>> transforms.ToTensor(), >>> transforms.Normalize([.485, .456, .406], [.229, .224, .225]), >>> ]) >>> # Create Dataset >>> trainset = gluonvision.data.VOCSegmentation(split='train', transform=input_transform) >>> # Create Training Loader >>> train_data = gluon.data.DataLoader( >>> trainset, 4, shuffle=True, last_batch='rollover', >>> num_workers=4) """ BASE_DIR = 'VOC2012' def __init__(self, root=os.path.expanduser('~/.mxnet/datasets/voc'), split='train', transform=None): super(VOCSegmentation, self).__init__(root) self.root = root _voc_root = os.path.join(self.root, self.BASE_DIR) _mask_dir = os.path.join(_voc_root, 'SegmentationClass') _image_dir = os.path.join(_voc_root, 'JPEGImages') self.transform = transform self.train = split # train/val/test splits are pre-cut _splits_dir = os.path.join(_voc_root, 'ImageSets/Segmentation') if self.train == 'train': _split_f = os.path.join(_splits_dir, 'trainval.txt') elif self.train == 'val': _split_f = os.path.join(_splits_dir, 'val.txt') elif self.train == 'test': _split_f = os.path.join(_splits_dir, 'test.txt') else: raise RuntimeError('Unknown dataset split.') self.images = [] self.masks = [] with open(os.path.join(_split_f), "r") as lines: for line in lines: _image = os.path.join(_image_dir, line.rstrip('\n')+".jpg") assert os.path.isfile(_image) self.images.append(_image) if self.train != 'test': _mask = os.path.join(_mask_dir, line.rstrip('\n')+".png") assert os.path.isfile(_mask) self.masks.append(_mask) if self.train != 'test': assert (len(self.images) == len(self.masks)) def __getitem__(self, index): img = Image.open(self.images[index]).convert('RGB') if self.train == 'test': img = self._img_transform(img) if self.transform is not None: img = self.transform(img) return img, os.path.basename(self.images[index]) timg = Image.open(self.masks[index]) target = np.array(timg, dtype=np.uint8) target[target == 255] = -1 target = Image.fromarray(target) # synchrosized transform if self.train == 'train': img, target = self._sync_transform(img, target) elif self.train == 'val': img, target = self._val_sync_transform(img, target) else: raise RuntimeError('unknown mode for dataloader: {}'.format(self.mode)) # general resize, normalize and toTensor if self.transform is not None: img = self.transform(img) return img, target def __len__(self): return len(self.images) @property def classes(self): """Category names.""" return ('background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorcycle', 'person', 'potted-plant', 'sheep', 'sofa', 'train', 'tv', 'ambiguous')