diff --git a/data/base_dataset.py b/data/base_dataset.py index 4a010c52..9948bfc5 100644 --- a/data/base_dataset.py +++ b/data/base_dataset.py @@ -16,7 +16,7 @@ def get_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSizeX, opt.loadSizeY] - transform_list.append(transforms.Scale(osize, Image.BICUBIC)) + transform_list.append(transforms.Resize(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) diff --git a/data/custom_dataset_data_loader.py b/data/custom_dataset_data_loader.py index 1bf1dc22..1d2441c3 100644 --- a/data/custom_dataset_data_loader.py +++ b/data/custom_dataset_data_loader.py @@ -13,6 +13,7 @@ def CreateDataset(opt): elif opt.dataset_mode == 'single': from data.single_dataset import SingleDataset dataset = SingleDataset() + dataset.initialize(opt) else: raise ValueError("Dataset [%s] not recognized." % opt.dataset_mode) diff --git a/models/test_model.py b/models/test_model.py index a1316766..5c2d83ed 100644 --- a/models/test_model.py +++ b/models/test_model.py @@ -1,3 +1,4 @@ +import torch from torch.autograd import Variable from collections import OrderedDict import util.util as util @@ -33,8 +34,9 @@ def set_input(self, input): self.image_paths = input['A_paths'] def test(self): - self.real_A = Variable(self.input_A, volatile=True) - self.fake_B = self.netG.forward(self.real_A) + with torch.no_grad(): + self.real_A = Variable(self.input_A) + self.fake_B = self.netG.forward(self.real_A) # get image paths def get_image_paths(self):