From eec5d9983bd489fe38f303889d3ce2ff28532505 Mon Sep 17 00:00:00 2001 From: surgan12 Date: Wed, 5 Dec 2018 17:54:21 +0530 Subject: [PATCH 1/4] mnist added dcgan --- dcgan/.swp | Bin 0 -> 12288 bytes dcgan/main.py | 20 +++++++++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) create mode 100644 dcgan/.swp diff --git a/dcgan/.swp b/dcgan/.swp new file mode 100644 index 0000000000000000000000000000000000000000..9fe97df71e9a1f53c44f004c49e7c346ed8207a1 GIT binary patch literal 12288 zcmeI%zY2mt9Ki9PH8mCW0#>tPwe$!z6fU+ag-GE=S!?u;Jx)budQ0GH2;YPA$Nl4e z_kkM@BX6@@*>l$uU3&SaD0f%nd^yEoYI>?;G0$1}jvro=L!we=t+HLD)V_KVBM2Z+ zvp^@9O();0J{}G2K|iQ@f3`ya0R#|0009ILKmdV%3S>u1ZaRThnMAvc)=w;(1OyO3 z009ILKmY**5I_I{1Q2MTK)4lobVZ)J=KufA`~R)mFVo+^bCEIv2q1s}0tg_000Iag LfB*sr7=cVam4X?g literal 0 HcmV?d00001 diff --git a/dcgan/main.py b/dcgan/main.py index 88a70c95a5..8751189e33 100644 --- a/dcgan/main.py +++ b/dcgan/main.py @@ -14,7 +14,7 @@ parser = argparse.ArgumentParser() -parser.add_argument('--dataset', required=True, help='cifar10 | lsun | imagenet | folder | lfw | fake') +parser.add_argument('--dataset', required=True, help='cifar10 | lsun | mnist |imagenet | folder | lfw | fake') parser.add_argument('--dataroot', required=True, help='path to dataset') parser.add_argument('--workers', type=int, help='number of data loading workers', default=2) parser.add_argument('--batchSize', type=int, default=64, help='input batch size') @@ -22,7 +22,7 @@ parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector') parser.add_argument('--ngf', type=int, default=64) parser.add_argument('--ndf', type=int, default=64) -parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for') +parser.add_argument('--niter', type=int, default=2, help='number of epochs to train for') parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002') parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5') parser.add_argument('--cuda', action='store_true', help='enables cuda') @@ -60,6 +60,7 @@ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])) + nc=3 elif opt.dataset == 'lsun': dataset = dset.LSUN(root=opt.dataroot, classes=['bedroom_train'], transform=transforms.Compose([ @@ -68,6 +69,7 @@ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])) + nc=3 elif opt.dataset == 'cifar10': dataset = dset.CIFAR10(root=opt.dataroot, download=True, transform=transforms.Compose([ @@ -75,9 +77,22 @@ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])) + nc=3 + +elif opt.dataset == 'mnist': + dataset = dset.MNIST(root=opt.dataroot, download=True, + transform=transforms.Compose([ + transforms.Resize(opt.imageSize), + transforms.ToTensor(), + transforms.Normalize((0.5,), (0.5,)), + ])) + nc=1 + elif opt.dataset == 'fake': dataset = dset.FakeData(image_size=(3, opt.imageSize, opt.imageSize), transform=transforms.ToTensor()) + nc=3 + assert dataset dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers)) @@ -87,7 +102,6 @@ nz = int(opt.nz) ngf = int(opt.ngf) ndf = int(opt.ndf) -nc = 3 # custom weights initialization called on netG and netD From f568a3ad47e3776b5ff149578cc0433e2115fdda Mon Sep 17 00:00:00 2001 From: surgan12 Date: Wed, 5 Dec 2018 18:03:32 +0530 Subject: [PATCH 2/4] mnist added --- dcgan/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcgan/main.py b/dcgan/main.py index 8751189e33..55fecba2bd 100644 --- a/dcgan/main.py +++ b/dcgan/main.py @@ -22,7 +22,7 @@ parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector') parser.add_argument('--ngf', type=int, default=64) parser.add_argument('--ndf', type=int, default=64) -parser.add_argument('--niter', type=int, default=2, help='number of epochs to train for') +parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for') parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002') parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5') parser.add_argument('--cuda', action='store_true', help='enables cuda') From 11ef15167896ac48a483e6cac20b7bb6a772e2dd Mon Sep 17 00:00:00 2001 From: surgan12 Date: Wed, 12 Dec 2018 17:58:17 +0530 Subject: [PATCH 3/4] mnist improved --- mnist/main.py | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/mnist/main.py b/mnist/main.py index e3b7fc0beb..bf46e67dc5 100644 --- a/mnist/main.py +++ b/mnist/main.py @@ -6,24 +6,27 @@ import torch.optim as optim from torchvision import datasets, transforms + class Net(nn.Module): def __init__(self): super(Net, self).__init__() - self.conv1 = nn.Conv2d(1, 10, kernel_size=5) - self.conv2 = nn.Conv2d(10, 20, kernel_size=5) - self.conv2_drop = nn.Dropout2d() - self.fc1 = nn.Linear(320, 50) - self.fc2 = nn.Linear(50, 10) + self.conv1 = nn.Conv2d(1, 20, 5, 1) + self.conv2 = nn.Conv2d(20, 50, 5, 1) + self.fc1 = nn.Linear(4*4*50, 500) + self.fc2 = nn.Linear(500, 10) def forward(self, x): - x = F.relu(F.max_pool2d(self.conv1(x), 2)) - x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) - x = x.view(-1, 320) + x = F.relu(self.conv1(x)) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.conv2(x)) + x = F.max_pool2d(x, 2, 2) + x = x.view(-1, 4*4*50) x = F.relu(self.fc1(x)) - x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=1) - + + def name(self): + return "Net" def train(args, model, device, train_loader, optimizer, epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): @@ -51,6 +54,7 @@ def test(args, model, device, test_loader): correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) + print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) @@ -74,6 +78,9 @@ def main(): help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') + + parser.add_argument('--save-model', action='store_true', default=False, + help='For Saving the current Model') args = parser.parse_args() use_cuda = not args.no_cuda and torch.cuda.is_available() @@ -100,10 +107,14 @@ def main(): model = Net().to(device) optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) - for epoch in range(1, args.epochs + 1): + for epoch in range(1, args.epochs+1): train(args, model, device, train_loader, optimizer, epoch) test(args, model, device, test_loader) + if (args.save_model): + PATH="mnist_cnn.pt" + torch.save(model.state_dict(), PATH) + print("model saved as mnist_cnn.pt in the current working directory\n") if __name__ == '__main__': main() \ No newline at end of file From adcd03c635fce2db6bbc0ac7b5c9dcc5b805cc1b Mon Sep 17 00:00:00 2001 From: surgan12 <33121121+surgan12@users.noreply.github.com> Date: Thu, 13 Dec 2018 11:33:52 +0530 Subject: [PATCH 4/4] Update main.py --- mnist/main.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/mnist/main.py b/mnist/main.py index bf46e67dc5..e971f06a76 100644 --- a/mnist/main.py +++ b/mnist/main.py @@ -25,8 +25,6 @@ def forward(self, x): x = self.fc2(x) return F.log_softmax(x, dim=1) - def name(self): - return "Net" def train(args, model, device, train_loader, optimizer, epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): @@ -107,14 +105,12 @@ def main(): model = Net().to(device) optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) - for epoch in range(1, args.epochs+1): + for epoch in range(1, args.epochs + 1): train(args, model, device, train_loader, optimizer, epoch) test(args, model, device, test_loader) if (args.save_model): - PATH="mnist_cnn.pt" - torch.save(model.state_dict(), PATH) - print("model saved as mnist_cnn.pt in the current working directory\n") - + torch.save(model.state_dict(),"mnist_cnn.pt") + if __name__ == '__main__': - main() \ No newline at end of file + main()