I am very new to pytorch so I need a bit of handholding. I am trying to re-use an old CNN classification model -- reusing the already trained convolutional layers as the encoder in an autoencoder and then training the decoder layers. The below code is what I have.
class Autoencoder(nn.Module):
def __init__(self, model, specs):
super(Autoencoder, self).__init__()
self.encoder = nn.Sequential(
*list(model.conv_layer.children())
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(in_channels=C7, out_channels=C6, kernel_size=pooling, padding=0),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=C6, out_channels=C5, kernel_size=pooling, padding=0),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=C5, out_channels=C4, kernel_size=pooling, padding=0),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=C4, out_channels=C3, kernel_size=pooling, padding=0),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=C3, out_channels=C2, kernel_size=pooling, padding=0),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=C2, out_channels=C1, kernel_size=pooling, padding=0),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=C1, out_channels=C0, kernel_size=pooling, padding=0),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels=C0, out_channels=3, kernel_size=pooling, padding=0),
nn.ReLU(inplace=True),
)
for param in self.encoder.parameters():
param.requires_grad = False
for p in self.decoder.parameters():
if p.dim() > 1:
nn.init.kaiming_normal_(p)
pass
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
However, I am getting a "NotImplementedError". What am I doing wrong? When I initiate an instance of that class, I would be passing the pretrained CNN classification model and self.encoder should take care of taking the layers I am interested from the model (those in conv_layer). When I:
model = pretrainedCNNmodel
autoencoder = Autoencoder(model, specs)
print(autoencoder)
the print looks okay, it has all layers and everything I am hoping for, but when I try to train on it I get the "NotImplementedError:".
Here is the entire error:
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
<ipython-input-20-9adc467b2472> in <module>()
2 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=L2_lambda)
3
----> 4 train(x, train_loader, test_loader, optimizer, criterion)
2 frames
<ipython-input-5-b25edb14cf5f> in train(model, train_loader, test_loader, optimizer, criterion)
15 data, target = data.cuda(), target.cuda()
16 optimizer.zero_grad()
---> 17 output = model(data)
18 loss = criterion(output, target)
19 loss.backward()
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
530 result = self._slow_forward(*input, **kwargs)
531 else:
--> 532 result = self.forward(*input, **kwargs)
533 for hook in self._forward_hooks.values():
534 hook_result = hook(self, input, result)
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in forward(self, *input)
94 registered hooks while the latter silently ignores them.
95 """
---> 96 raise NotImplementedError
97
98 def register_buffer(self, name, tensor):
NotImplementedError:
Since you have a bounty on this question, it cannot be closed. However, the exact same question was already asked and answered in this thread.
Basically, you have an indentation problem in your code: Your forward method is indented such that it is inside your __init__ method, instead of being part of the Autoencoder class.
Please see my other answer for more details.
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With