Complex nonlinear optimization in pytorch - pytorch

I am trying to fit a rational function to complex data using pytorch. I am able to compute the loss correctly but I did not observe a reduction in the loss function. Kindly assist as I am still a newbie with pytorch. I have successfully done this using scipy.optimize.minimize. My intention is to see if I could use pytorch for the same thing. My example is below
import numpy as np
import torch
from torch import nn
from copy import copy
F = torch.tensor([8, 25, 42, 59, 76, 93, 127, 161, 246, 314, 399, 501, 637, 788, 993, 1252,
1585, 1995, 2512, 3162, 3981, 5012, 6310, 7943, 10000, 12589, 15849, 19953, 25119, 31623, 39811, 50119,
63096, 79433, 100000, 125893, 158490, 199527, 251189, 316228, 398108, 501188, 630958, 794329, 1000000])
Y = torch.tensor([2.7820e-05+1.9965e-05j, 4.4625e-05+2.9172e-05j, 5.4679e-05+3.3440e-05j,
6.1465e-05+3.6670e-05j, 6.7193e-05+3.8804e-05j, 7.1745e-05+4.1246e-05j,
7.8491e-05+4.4649e-05j, 8.4303e-05+4.7946e-05j, 9.4247e-05+5.5973e-05j,
9.9564e-05+6.2098e-05j, 1.0543e-04+6.8537e-05j, 1.1094e-04+7.7572e-05j,
1.1712e-04+8.9025e-05j, 1.2216e-04+1.0136e-04j, 1.2858e-04+1.1761e-04j,
1.3547e-04+1.3883e-04j, 1.4320e-04+1.6582e-04j, 1.5198e-04+1.9882e-04j,
1.6214e-04+2.3993e-04j, 1.7473e-04+2.9009e-04j, 1.9064e-04+3.5326e-04j,
2.1126e-04+4.3044e-04j, 2.3898e-04+5.2610e-04j, 2.7717e-04+6.4262e-04j,
3.2993e-04+7.8392e-04j, 4.0355e-04+9.5308e-04j, 5.0546e-04+1.1531e-03j,
6.4983e-04+1.3836e-03j, 8.4780e-04+1.6383e-03j, 1.1141e-03+1.9142e-03j,
1.4616e-03+2.1828e-03j, 1.8944e-03+2.4220e-03j, 2.4044e-03+2.6006e-03j,
2.9653e-03+2.6870e-03j, 3.5370e-03+2.6675e-03j, 4.0787e-03+2.5499e-03j,
4.5529e-03+2.3500e-03j, 4.9540e-03+2.1088e-03j, 5.2742e-03+1.8576e-03j,
5.5202e-03+1.5838e-03j, 5.7254e-03+1.3473e-03j, 5.8689e-03+1.1367e-03j,
5.9645e-03+9.5294e-04j, 6.0288e-03+7.7854e-04j, 6.1126e-03+6.4864e-04j])
sigma_Y = torch.tensor([2.6392e-08, 4.1651e-08, 5.0401e-08, 5.6586e-08, 6.1554e-08, 6.5615e-08,
7.2068e-08, 7.7146e-08, 8.6435e-08, 9.1971e-08, 9.7485e-08, 1.0291e-07,
1.0886e-07, 1.1439e-07, 1.2080e-07, 1.2778e-07, 1.3563e-07, 1.4389e-07,
1.5327e-07, 1.6397e-07, 1.7726e-07, 1.9387e-07, 2.1571e-07, 2.4450e-07,
2.8238e-07, 3.3201e-07, 3.9591e-07, 4.7788e-07, 5.7940e-07, 7.0818e-07,
8.6534e-07, 1.0613e-06, 1.3125e-06, 1.6464e-06, 2.1095e-06, 2.7707e-06,
3.7069e-06, 5.0162e-06, 6.7130e-06, 8.6371e-06, 1.0583e-05, 1.2018e-05,
1.2690e-05, 1.2639e-05, 1.2236e-05])
# rational function
def pade_approx(x, *p):
norder = int((len(p)-1)/2)
a = torch.tensor(p)[0:norder+1]
b = torch.concat([torch.tensor(p)[norder+1:2*norder+1], torch.tensor([1])], axis = 0)
Ypa = np.polyval(a,np.sqrt(1j*x))/np.polyval(b,np.sqrt(1j*x))
return np.hstack([(Ypa).real, (Ypa).imag])
class Model(nn.Module):
"""Custom Pytorch model for gradient optimization.
"""
def __init__(self, p, x, y, yerr):
super().__init__()
# initialize weights with random numbers
# make weights torch parameters
self.p = nn.Parameter(p)
self.x = x
self.y = y
self.yerr = yerr
self.ndata = len(x)
self.npar = len(p)
# loss function
def obj_fun(self):
dof = (2*self.ndata-(self.npar))
y_concat = torch.concat([self.y.real, self.y.imag], dim = 0)
sigma = torch.concat([self.yerr,self.yerr], dim = 0)
y_model = pade_approx(self.x,*self.p)
chi_sqr = (1/dof)*(torch.sum(torch.abs((1/sigma**2) * (y_concat - y_model)**2)))
return (torch.tensor(chi_sqr, requires_grad=True))
def training_loop(model, optimizer, n=1000):
"Training loop for torch model."
losses = []
for i in range(n):
optimizer.zero_grad()
loss = model.obj_fun()
print(loss)
loss.backward()
optimizer.step()
losses.append(loss.clone())
return losses
order = 5
# make initial parameters
p0 = torch.ones(2*order+1)
# instantiate model
m = Model(p0, F, Y, sigma_Y)
# Instantiate optimizer
opt = torch.optim.Adam(m.parameters(), lr=0.1)
losses = training_loop(m, opt)
#Output when I print losses
#c:\Users\richinex\miniconda3\envs\torch\lib\site-packages\ipykernel_launcher.py:23: UserWarning: #To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or #sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
#Output exceeds the size limit. Open the full output data in a text editor
#tensor(5.8691e+13, requires_grad=True)
#tensor(5.8691e+13, requires_grad=True)
#tensor(5.8691e+13, requires_grad=True)
#tensor(5.8691e+13, requires_grad=True)...

Related

Visualisation with GradCam from Monai expects larger input image

I want to visualize the crucial parts which were important for the classification in healthy and ill with GradCAM for 3D MRI images.
Therefore, I use
cam = GradCAM(nn_module=densenet, target_layers="class_layers.relu")
result = cam(x=torch.rand((1, 1, 7, 7, 7)))
where my densenet is defined as:
self.densenet = densenet.densenet121(spatial_dims=3, in_channels=1,
out_channels=1)
this throws the error:
RuntimeError: input image (T: 1 H: 1 W: 1) smaller than kernel size
(kT: 2 kH: 2 kW: 2)
Raising H,W,D to 30
result = cam(x=torch.rand((1, 1, 30, 30, 30)))
leads to
Traceback (most recent call last):
File
"/var/folders/79/z7g43_0x08g2yj7w6lb_j5280000gn/T/ipykernel_3133/4028907207.py",
line 1, in
result = cam(x=torch.rand((64, 1, 30, 30, 30))) #result mri image nehmen
File
"/Users/Wu/opt/anaconda3/lib/python3.9/site-packages/monai/visualize/class_activation_maps.py",
line 380, in call
acti_map = self.compute_map(x, class_idx=class_idx, retain_graph=retain_graph, layer_idx=layer_idx)
File
"/Users/Wu/opt/anaconda3/lib/python3.9/site-packages/monai/visualize/class_activation_maps.py",
line 360, in compute_map
_, acti, grad = self.nn_module(x, class_idx=class_idx, retain_graph=retain_graph)
File
"/Users/Wu/opt/anaconda3/lib/python3.9/site-packages/monai/visualize/class_activation_maps.py",
line 135, in call
acti = tuple(self.activations[layer] for layer in self.target_layers)
File
"/Users/Wu/opt/anaconda3/lib/python3.9/site-packages/monai/visualize/class_activation_maps.py",
line 135, in
acti = tuple(self.activations[layer] for layer in self.target_layers)
KeyError: 'class_layers.relu'
And using my own densenet class:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 8 23:23:24 2022
#author: Wu
"""
###methods from pytorch lightening
import torch
import pytorch_lightning as pl
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
import torch.nn.functional as F
from torch import nn
import monai.networks.nets.densenet as densenet
import pycm
import numpy as np
class DenseNet(pl.LightningModule):
def __init__(self, learning_rate=1e-4):
super().__init__()
self.densenet = densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=1)
self.learning_rate = learning_rate
self.class_loss = torch.nn.BCEWithLogitsLoss(reduction='mean') #binarycorssentropy loss
bceloss
print('1')
def forward(self, x):
logits = self.densenet(x)
print('2')
#float(logits.float())
#logits = logits.float()
print(logits.dtype)
return logits
def configure_optimizers(self):
print('3')
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
return optimizer
def on_train_epoch_start(self):
print('4')
self.train_predictions = []
self.train_labels = []
def training_step(self, batch, batch_idx):
print('5')
mri, y = batch['mri'], batch['label']
#y = y.type_as(logits)
#float(y.float())
y = y.float()
print (y.dtype)
logits = self(mri)
#float(logits.float())
logits = logits.float()
y_hat = (logits >= 0).float()
class_loss = self.class_loss(logits, y.unsqueeze(0))
self.log('loss/train', class_loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.train_predictions.extend(y_hat.tolist())
self.train_labels.extend(y.tolist())
return class_loss
def on_train_epoch_end(self, outputs):
print('6')
cm = pycm.ConfusionMatrix(actual_vector=self.train_labels, predict_vector=self.train_predictions)
self.log('accuracy/train', cm.Overall_ACC)
if cm.F1_Macro is not 'None':
self.log('f1_macro/train', cm.F1_Macro)
if cm.F1_Micro is not 'None':
self.log('f1_micro/train', cm.F1_Micro)
#prit(cm)
def on_validation_epoch_start(self):
print('7')
self.predictions = []
self.labels = []
def validation_step(self, batch, batch_idx):
print('8')
mri, y = batch['mri'], batch['label']
logits = self(mri)
float(y.float())
y = y.float()
print (y.dtype)
float(logits.float())
logits = logits.float()
y_hat = (logits >= 0).float()
class_loss = self.class_loss(logits, y.unsqueeze(0))
self.log(f'loss/valid', class_loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.predictions.extend(y_hat.tolist())
self.labels.extend(y.tolist())
def on_validation_epoch_end(self):
print('9')
cm = pycm.ConfusionMatrix(actual_vector=self.labels, predict_vector=self.predictions)
self.log('accuracy/valid', cm.Overall_ACC)
if cm.F1_Macro is not 'None':
self.log('f1_macro/valid', cm.F1_Macro)
if cm.F1_Micro is not 'None':
self.log('f1_micro/valid', cm.F1_Micro)
#print(cm)
The output of densenet.state_dict().keys() is
So there is no class_layers.relu.
What would be the equivalent layer?
densenet.state_dict().keys()
.
.
.
.
'densenet.features.denseblock4.denselayer1.layers.norm1.num_batches_tracked',
'densenet.features.denseblock4.denselayer1.layers.conv1.weight',
'densenet.features.denseblock4.denselayer1.layers.norm2.weight',
'densenet.features.denseblock4.denselayer1.layers.norm2.bias',
'densenet.features.denseblock4.denselayer1.layers.norm2.running_mean',
'densenet.features.denseblock4.denselayer1.layers.norm2.running_var',
'densenet.features.denseblock4.denselayer1.layers.norm2.num_batches_tracked',
'densenet.features.denseblock4.denselayer1.layers.conv2.weight',
'densenet.features.denseblock4.denselayer2.layers.norm1.weight',
'densenet.features.denseblock4.denselayer2.layers.norm1.bias',
'densenet.features.denseblock4.denselayer2.layers.norm1.running_mean',
'densenet.features.denseblock4.denselayer2.layers.norm1.running_var',
'densenet.features.denseblock4.denselayer2.layers.norm1.num_batches_tracked',
'densenet.features.denseblock4.denselayer2.layers.conv1.weight',
'densenet.features.denseblock4.denselayer2.layers.norm2.weight',
'densenet.features.denseblock4.denselayer2.layers.norm2.bias',
'densenet.features.denseblock4.denselayer2.layers.norm2.running_mean',
'densenet.features.denseblock4.denselayer2.layers.norm2.running_var',
'densenet.features.denseblock4.denselayer2.layers.norm2.num_batches_tracked',
'densenet.features.denseblock4.denselayer2.layers.conv2.weight',
'densenet.features.denseblock4.denselayer3.layers.norm1.weight',
'densenet.features.denseblock4.denselayer3.layers.norm1.bias',
'densenet.features.denseblock4.denselayer3.layers.norm1.running_mean',
'densenet.features.denseblock4.denselayer3.layers.norm1.running_var',
'densenet.features.denseblock4.denselayer3.layers.norm1.num_batches_tracked',
'densenet.features.denseblock4.denselayer3.layers.conv1.weight',
'densenet.features.denseblock4.denselayer3.layers.norm2.weight',
'densenet.features.denseblock4.denselayer3.layers.norm2.bias',
'densenet.features.denseblock4.denselayer3.layers.norm2.running_mean',
'densenet.features.denseblock4.denselayer3.layers.norm2.running_var',
'densenet.features.denseblock4.denselayer3.layers.norm2.num_batches_tracked',
'densenet.features.denseblock4.denselayer3.layers.conv2.weight',
'densenet.features.denseblock4.denselayer4.layers.norm1.weight',
'densenet.features.denseblock4.denselayer4.layers.norm1.bias',
'densenet.features.denseblock4.denselayer4.layers.norm1.running_mean',
'densenet.features.denseblock4.denselayer4.layers.norm1.running_var',
'densenet.features.denseblock4.denselayer4.layers.norm1.num_batches_tracked',
'densenet.features.denseblock4.denselayer4.layers.conv1.weight',
'densenet.features.denseblock4.denselayer4.layers.norm2.weight',
'densenet.features.denseblock4.denselayer4.layers.norm2.bias',
'densenet.features.denseblock4.denselayer4.layers.norm2.running_mean',
'densenet.features.denseblock4.denselayer4.layers.norm2.running_var',
'densenet.features.denseblock4.denselayer4.layers.norm2.num_batches_tracked',
'densenet.features.denseblock4.denselayer4.layers.conv2.weight',
'densenet.features.denseblock4.denselayer5.layers.norm1.weight',
'densenet.features.denseblock4.denselayer5.layers.norm1.bias',
'densenet.features.denseblock4.denselayer5.layers.norm1.running_mean',
'densenet.features.denseblock4.denselayer5.layers.norm1.running_var',
'densenet.features.denseblock4.denselayer5.layers.norm1.num_batches_tracked',
'densenet.features.denseblock4.denselayer5.layers.conv1.weight',
'densenet.features.denseblock4.denselayer5.layers.norm2.weight',
'densenet.features.denseblock4.denselayer5.layers.norm2.bias',
'densenet.features.denseblock4.denselayer5.layers.norm2.running_mean',
'densenet.features.denseblock4.denselayer5.layers.norm2.running_var',
'densenet.features.denseblock4.denselayer5.layers.norm2.num_batches_tracked',
'densenet.features.denseblock4.denselayer5.layers.conv2.weight',
'densenet.features.denseblock4.denselayer6.layers.norm1.weight',
'densenet.features.denseblock4.denselayer6.layers.norm1.bias',
'densenet.features.denseblock4.denselayer6.layers.norm1.running_mean',
'densenet.features.denseblock4.denselayer6.layers.norm1.running_var',
'densenet.features.denseblock4.denselayer6.layers.norm1.num_batches_tracked',
'densenet.features.denseblock4.denselayer6.layers.conv1.weight',
'densenet.features.denseblock4.denselayer6.layers.norm2.weight',
'densenet.features.denseblock4.denselayer6.layers.norm2.bias',
'densenet.features.denseblock4.denselayer6.layers.norm2.running_mean',
'densenet.features.denseblock4.denselayer6.layers.norm2.running_var',
'densenet.features.denseblock4.denselayer6.layers.norm2.num_batches_tracked',
'densenet.features.denseblock4.denselayer6.layers.conv2.weight',
'densenet.features.denseblock4.denselayer7.layers.norm1.weight',
'densenet.features.denseblock4.denselayer7.layers.norm1.bias',
'densenet.features.denseblock4.denselayer7.layers.norm1.running_mean',
'densenet.features.denseblock4.denselayer7.layers.norm1.running_var',
'densenet.features.denseblock4.denselayer7.layers.norm1.num_batches_tracked',
'densenet.features.denseblock4.denselayer7.layers.conv1.weight',
'densenet.features.denseblock4.denselayer7.layers.norm2.weight',
'densenet.features.denseblock4.denselayer7.layers.norm2.bias',
'densenet.features.denseblock4.denselayer7.layers.norm2.running_mean',
'densenet.features.denseblock4.denselayer7.layers.norm2.running_var',
'densenet.features.denseblock4.denselayer7.layers.norm2.num_batches_tracked',
'densenet.features.denseblock4.denselayer7.layers.conv2.weight',
'densenet.features.denseblock4.denselayer8.layers.norm1.weight',
'densenet.features.denseblock4.denselayer8.layers.norm1.bias',
'densenet.features.denseblock4.denselayer8.layers.norm1.running_mean',
'densenet.features.denseblock4.denselayer8.layers.norm1.running_var',
'densenet.features.denseblock4.denselayer8.layers.norm1.num_batches_tracked',
'densenet.features.denseblock4.denselayer8.layers.conv1.weight',
'densenet.features.denseblock4.denselayer8.layers.norm2.weight',
'densenet.features.denseblock4.denselayer8.layers.norm2.bias',
'densenet.features.denseblock4.denselayer8.layers.norm2.running_mean',
'densenet.features.denseblock4.denselayer8.layers.norm2.running_var',
'densenet.features.denseblock4.denselayer8.layers.norm2.num_batches_tracked',
'densenet.features.denseblock4.denselayer8.layers.conv2.weight',
'densenet.features.denseblock4.denselayer9.layers.norm1.weight',
'densenet.features.denseblock4.denselayer9.layers.norm1.bias',
'densenet.features.denseblock4.denselayer9.layers.norm1.running_mean',
'densenet.features.denseblock4.denselayer9.layers.norm1.running_var',
'densenet.features.denseblock4.denselayer9.layers.norm1.num_batches_tracked',
'densenet.features.denseblock4.denselayer9.layers.conv1.weight',
'densenet.features.denseblock4.denselayer9.layers.norm2.weight',
'densenet.features.denseblock4.denselayer9.layers.norm2.bias',
'densenet.features.denseblock4.denselayer9.layers.norm2.running_mean',
'densenet.features.denseblock4.denselayer9.layers.norm2.running_var',
'densenet.features.denseblock4.denselayer9.layers.norm2.num_batches_tracked',
'densenet.features.denseblock4.denselayer9.layers.conv2.weight',
'densenet.features.denseblock4.denselayer10.layers.norm1.weight',
'densenet.features.denseblock4.denselayer10.layers.norm1.bias',
'densenet.features.denseblock4.denselayer10.layers.norm1.running_mean',
'densenet.features.denseblock4.denselayer10.layers.norm1.running_var',
'densenet.features.denseblock4.denselayer10.layers.norm1.num_batches_tracked',
'densenet.features.denseblock4.denselayer10.layers.conv1.weight',
'densenet.features.denseblock4.denselayer10.layers.norm2.weight',
'densenet.features.denseblock4.denselayer10.layers.norm2.bias',
'densenet.features.denseblock4.denselayer10.layers.norm2.running_mean',
'densenet.features.denseblock4.denselayer10.layers.norm2.running_var',
'densenet.features.denseblock4.denselayer10.layers.norm2.num_batches_tracked',
'densenet.features.denseblock4.denselayer10.layers.conv2.weight',
'densenet.features.denseblock4.denselayer11.layers.norm1.weight',
'densenet.features.denseblock4.denselayer11.layers.norm1.bias',
'densenet.features.denseblock4.denselayer11.layers.norm1.running_mean',
'densenet.features.denseblock4.denselayer11.layers.norm1.running_var',
'densenet.features.denseblock4.denselayer11.layers.norm1.num_batches_tracked',
'densenet.features.denseblock4.denselayer11.layers.conv1.weight',
'densenet.features.denseblock4.denselayer11.layers.norm2.weight',
'densenet.features.denseblock4.denselayer11.layers.norm2.bias',
'densenet.features.denseblock4.denselayer11.layers.norm2.running_mean',
'densenet.features.denseblock4.denselayer11.layers.norm2.running_var',
'densenet.features.denseblock4.denselayer11.layers.norm2.num_batches_tracked',
'densenet.features.denseblock4.denselayer11.layers.conv2.weight',
'densenet.features.denseblock4.denselayer12.layers.norm1.weight',
'densenet.features.denseblock4.denselayer12.layers.norm1.bias',
'densenet.features.denseblock4.denselayer12.layers.norm1.running_mean',
'densenet.features.denseblock4.denselayer12.layers.norm1.running_var',
'densenet.features.denseblock4.denselayer12.layers.norm1.num_batches_tracked',
'densenet.features.denseblock4.denselayer12.layers.conv1.weight',
'densenet.features.denseblock4.denselayer12.layers.norm2.weight',
'densenet.features.denseblock4.denselayer12.layers.norm2.bias',
'densenet.features.denseblock4.denselayer12.layers.norm2.running_mean',
'densenet.features.denseblock4.denselayer12.layers.norm2.running_var',
'densenet.features.denseblock4.denselayer12.layers.norm2.num_batches_tracked',
'densenet.features.denseblock4.denselayer12.layers.conv2.weight',
'densenet.features.denseblock4.denselayer13.layers.norm1.weight',
'densenet.features.denseblock4.denselayer13.layers.norm1.bias',
'densenet.features.denseblock4.denselayer13.layers.norm1.running_mean',
'densenet.features.denseblock4.denselayer13.layers.norm1.running_var',
'densenet.features.denseblock4.denselayer13.layers.norm1.num_batches_tracked',
'densenet.features.denseblock4.denselayer13.layers.conv1.weight',
'densenet.features.denseblock4.denselayer13.layers.norm2.weight',
'densenet.features.denseblock4.denselayer13.layers.norm2.bias',
'densenet.features.denseblock4.denselayer13.layers.norm2.running_mean',
'densenet.features.denseblock4.denselayer13.layers.norm2.running_var',
'densenet.features.denseblock4.denselayer13.layers.norm2.num_batches_tracked',
'densenet.features.denseblock4.denselayer13.layers.conv2.weight',
'densenet.features.denseblock4.denselayer14.layers.norm1.weight',
'densenet.features.denseblock4.denselayer14.layers.norm1.bias',
'densenet.features.denseblock4.denselayer14.layers.norm1.running_mean',
'densenet.features.denseblock4.denselayer14.layers.norm1.running_var',
'densenet.features.denseblock4.denselayer14.layers.norm1.num_batches_tracked',
'densenet.features.denseblock4.denselayer14.layers.conv1.weight',
'densenet.features.denseblock4.denselayer14.layers.norm2.weight',
'densenet.features.denseblock4.denselayer14.layers.norm2.bias',
'densenet.features.denseblock4.denselayer14.layers.norm2.running_mean',
'densenet.features.denseblock4.denselayer14.layers.norm2.running_var',
'densenet.features.denseblock4.denselayer14.layers.norm2.num_batches_tracked',
'densenet.features.denseblock4.denselayer14.layers.conv2.weight',
'densenet.features.denseblock4.denselayer15.layers.norm1.weight',
'densenet.features.denseblock4.denselayer15.layers.norm1.bias',
'densenet.features.denseblock4.denselayer15.layers.norm1.running_mean',
'densenet.features.denseblock4.denselayer15.layers.norm1.running_var',
'densenet.features.denseblock4.denselayer15.layers.norm1.num_batches_tracked',
'densenet.features.denseblock4.denselayer15.layers.conv1.weight',
'densenet.features.denseblock4.denselayer15.layers.norm2.weight',
'densenet.features.denseblock4.denselayer15.layers.norm2.bias',
'densenet.features.denseblock4.denselayer15.layers.norm2.running_mean',
'densenet.features.denseblock4.denselayer15.layers.norm2.running_var',
'densenet.features.denseblock4.denselayer15.layers.norm2.num_batches_tracked',
'densenet.features.denseblock4.denselayer15.layers.conv2.weight',
'densenet.features.denseblock4.denselayer16.layers.norm1.weight',
'densenet.features.denseblock4.denselayer16.layers.norm1.bias',
'densenet.features.denseblock4.denselayer16.layers.norm1.running_mean',
'densenet.features.denseblock4.denselayer16.layers.norm1.running_var',
'densenet.features.denseblock4.denselayer16.layers.norm1.num_batches_tracked',
'densenet.features.denseblock4.denselayer16.layers.conv1.weight',
'densenet.features.denseblock4.denselayer16.layers.norm2.weight',
'densenet.features.denseblock4.denselayer16.layers.norm2.bias',
'densenet.features.denseblock4.denselayer16.layers.norm2.running_mean',
'densenet.features.denseblock4.denselayer16.layers.norm2.running_var',
'densenet.features.denseblock4.denselayer16.layers.norm2.num_batches_tracked',
'densenet.features.denseblock4.denselayer16.layers.conv2.weight',
'densenet.features.norm5.weight', 'densenet.features.norm5.bias',
'densenet.features.norm5.running_mean',
'densenet.features.norm5.running_var',
'densenet.features.norm5.num_batches_tracked',
'densenet.class_layers.out.weight', 'densenet.class_layers.out.bias'])
You may need to pay attention to the usage of import, the following code should help you.
import torch
from monai.networks.nets import DenseNet121
from monai.visualize import GradCAM
model = DenseNet121(spatial_dims=3, in_channels=1, out_channels=1)
cam = GradCAM(nn_module=model, target_layers="class_layers.relu")
result = cam(x=torch.rand(1, 1, 64, 64, 64))

How to map a function to a pre-batched ('BatchDataset') tensor in Tensorflow

I am making data windows (input, output pairs of windows) from time series data. I have already converted my time series to a tf dataset, where each batch has the number of time steps equal to the total window sizes I need.
def make_dataset(data=train_df[0]):
ds = tf.keras.preprocessing.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=total_window_size,
sequence_stride=1,
shuffle=True,
batch_size=32
)
return ds
Example of the shape returned:
for example in tensor.take(1):
print(f'shape: {example.shape}')
shape: (32, 48, 18)
What I need to do now is split the time dimension into my input-output pairs, and I have a function to do this, however, when I try to map this function to my 'ds' in the above function I get the following error:
'BatchDataset' object is not subscriptable
I am hoping someone can help me understand where I am going wrong? I am pretty new to tensorflow... My code is below, in this example 'input slice' and 'label_slice' are 0 and 24 respectively. So my aim is to split my batches into input-output pairs of length 24 each.
def split_window(features):
inputs = features[:, input_slice, :]
labels = features[:, labels_slice, :]
inputs.set_shape([None, input_width, None])
labels.set_shape([None, label_width, None])
return inputs, labels
def make_dataset(data=train_df[0]):
data = np.array(data, dtype=np.float32)
ds = tf.keras.preprocessing.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=total_window_size,
sequence_stride=1,
shuffle=True,
batch_size=32
)
ds = ds.map(split_window(ds))
return ds
tensor = make_dataset()
tensor
'BatchDataset' object is not subscriptable
Your snippet of code looks similar to the tutorial of Time Series in Tensorflow. Based on that, I modified the main class WindowGenerator() (excluded the parts of train/val/test datasets and output-labels selection) to a simpler class suitable to your question.
class WindowGenerator():
def __init__(self, input_width, label_width, shift):
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size[self.input_slice]
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size [self.labels_slice]
def split_window(self, features):
inputs = features[:, self.input_slice, :]
labels = features[:, self.labels_slice, :]
inputs.set_shape([None, self.input_width, None])
labels.set_shape([None, self.label_width, None])
return inputs, labels
def make_dataset(self, data):
data = np.array(data, dtype=np.float32)
ds = tf.keras.utils.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=self.total_window_size,
sequence_stride=1,
shuffle=True,
batch_size=batch_size,)
ds = ds.map(self.split_window)
return ds
input_width=24
label_width=24
total_windth = input_width + label_width
batch_size = 32
window = WindowGenerator(input_width=input_width, label_width=label_width, shift=1)
dataset = window.make_dataset(train_df[0])
I would recommend, though, to use Dataset.window(). It is simpler and more intuitive.
dataset = tf.data.Dataset.from_tensor_slices(train_df[0])
dataset = dataset.window(total_windth, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(batch_size))
dataset = dataset.map(lambda window: (window[:-label_width], window[-input_width:]))

RuntimeError in Pytorch when increasing batch size to more than 1

This code for my custom data loader runs smoothly with batch_size=1, but when I increase batch size I get the following Error:
RuntimeError: Expected object of scalar type Double but got scalar type Long for sequence element 1 in sequence argument at position #1 'tensors'
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("TkAgg")
import os, h5py
import PIL
#------------------------------
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
#------------------------------
from data_augmentation import *
#------------------------------
dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
class NiftiDataset(Dataset):
def __init__(self,transformation_params,data_path, mode='train',transforms=None ):
"""
Parameters:
data_path (string): Root directory of the preprocessed dataset.
mode (string, optional): Select the image_set to use, ``train``, ``valid``
transforms (callable, optional): Optional transform to be applied
on a sample.
"""
self.data_path = data_path
self.mode = mode
self.images = []
self.labels = []
self.W_maps = []
self.centers = []
self.radiuss = []
self.pixel_spacings = []
self.transformation_params = transformation_params
self.transforms = transforms
#-------------------------------------------------------------------------------------
if self.mode == 'train':
self.data_path = os.path.join(self.data_path,'train_set')
elif self.mode == 'valid':
self.data_path = os.path.join(self.data_path,'validation_set')
#-------------------------------------------------------------------------------------
for _, _, f in os.walk(self.data_path):
for file in f:
hdf_file = os.path.join(self.data_path,file)
data = h5py.File(hdf_file,'r') # Dictionary
# Preprocessing of Input Image and Label
patch_img, patch_gt, patch_wmap = PreProcessData(file, data, self.mode, self.transformation_params)
#print(type(data))
self.images.append(patch_img) # 2D image
#print('image shape is : ',patch_img.shape)
self.labels.append(patch_gt) # 2D label
#print('label shape is : ',patch_img.shape)
self.W_maps.append(patch_wmap) # Weight_Map
# self.centers.append(data['roi_center'][:]) # [x,y]
# self.radiuss.append(data['roi_radii'][:]) # [R_min,R_max]
# self.pixel_spacings.append(data['pixel_spacing'][:]) # [x , y , z]
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image = self.images[index]
label = self.labels[index]
W_map = self.W_maps[index]
if self.transforms is not None:
image, label, W_maps = self.transforms(image, label, W_map)
return image, label, W_map
#=================================================================================================
if __name__ == '__main__':
# Test Routinue to check your threaded dataloader
# ACDC dataset has 4 labels
n_labels = 4
path = './hdf5_files'
batch_size = 1
# Data Augmentation Parameters
# Set patch extraction parameters
size1 = (128, 128)
patch_size = size1
mm_patch_size = size1
max_size = size1
train_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': mm_patch_size,
'add_noise': ['gauss', 'none1', 'none2'],
'rotation_range': (-5, 5),
'translation_range_x': (-5, 5),
'translation_range_y': (-5, 5),
'zoom_range': (0.8, 1.2),
'do_flip': (False, False),
}
valid_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': mm_patch_size}
transformation_params = { 'train': train_transformation_params,
'valid': valid_transformation_params,
'n_labels': 4,
'data_augmentation': True,
'full_image': False,
'data_deformation': False,
'data_crop_pad': max_size}
#====================================================================
dataset = NiftiDataset(transformation_params=transformation_params,data_path=path,mode='train')
dataloader = DataLoader(dataset=dataset,batch_size=2,shuffle=True,num_workers=0)
dataiter = iter(dataloader)
data = dataiter.next()
images, labels,W_map = data
#===============================================================================
# Data Visualization
#===============================================================================
print('image: ',images.shape,images.type(),'label: ',labels.shape,labels.type(),
'W_map: ',W_map.shape,W_map.type())
img = transforms.ToPILImage()(images[0,0,:,:,0].float())
lbl = transforms.ToPILImage()(labels[0,0,:,:].float())
W_mp = transforms.ToPILImage()(W_map [0,0,:,:].float())
plt.subplot(1,3,1)
plt.imshow(img,cmap='gray',interpolation=None)
plt.title('image')
plt.subplot(1,3,2)
plt.imshow(lbl,cmap='gray',interpolation=None)
plt.title('label')
plt.subplot(1,3,3)
plt.imshow(W_mp,cmap='gray',interpolation=None)
plt.title('Weight Map')
plt.show()
I have noticed some strange things such as Tensor types are different even though images and labels and weight maps are images with same type and size.
The Error Traceback:
Traceback (most recent call last):
File "D:\Saudi_CV\Vibot\Smester_2\2_Medical Image analysis\Project_2020\OUR_Project\data_loader.py", line 118, in <module>
data = dataiter.next()
File "F:\Download_2019\Anaconda3\lib\site-packages\torch\utils\data\dataloader.py", line 345, in __next__
data = self._next_data()
File "F:\Download_2019\Anaconda3\lib\site-packages\torch\utils\data\dataloader.py", line 385, in _next_data
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
File "F:\Download_2019\Anaconda3\lib\site-packages\torch\utils\data\_utils\fetch.py", line 47, in fetch
return self.collate_fn(data)
File "F:\Download_2019\Anaconda3\lib\site-packages\torch\utils\data\_utils\collate.py", line 79, in default_collate
return [default_collate(samples) for samples in transposed]
File "F:\Download_2019\Anaconda3\lib\site-packages\torch\utils\data\_utils\collate.py", line 79, in <listcomp>
return [default_collate(samples) for samples in transposed]
File "F:\Download_2019\Anaconda3\lib\site-packages\torch\utils\data\_utils\collate.py", line 64, in default_collate
return default_collate([torch.as_tensor(b) for b in batch])
File "F:\Download_2019\Anaconda3\lib\site-packages\torch\utils\data\_utils\collate.py", line 55, in default_collate
return torch.stack(batch, 0, out=out)
RuntimeError: Expected object of scalar type Double but got scalar type Long for sequence element 1 in sequence argument at position #1 'tensors'
[Finished in 19.9s with exit code 1]
The problem was solved through this solution explained on this page link
image = torch.from_numpy(self.images[index]).type(torch.FloatTensor)
label = torch.from_numpy(self.labels[index]).type(torch.FloatTensor)
W_map = torch.from_numpy(self.W_maps[index]).type(torch.FloatTensor)

Argument must be a string or a number issue, Not 'Type' - Pyspark

Update:
So i have been looking into the issue, the problem is with scikit-multiflow datastream. in last quarter of code stream_clf.partial_fit(X,y, classes=stream.target_values) here the class valuefor stream.target_values should a number or string, but the method is returning (dtype). When i print or loop stream.target_values i get this:
I have tried to do conversion etc. but still of no use. can someone please help here ?
Initial Problem
I am running a code (took inspiration from here). It works perfectly alright when used vanilla python environment.
But if i run this code after certain modification in Apache Spark using Pyspark , i get the following error
TypeError: int() argument must be a string, a bytes-like object or a number, not 'type'
I have tried every possibile way to trace the issue but everything looks alright. The error arises from the last line of the code where hoefding tree is called for prediction. It expects an ndarray and the type of X variable is also ndarray. I am not sure what is trigerring the issue. Can some one please help or direct me to right trace?
complete stack of error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-52-1310132c88db> in <module>
30 D3_win.addInstance(X,y)
31 xx = np.array(X,dtype='float64')
---> 32 y_hat = stream_clf.predict(xx)
33
34
~/conceptDrift/projectTest/lib/python3.5/site-packages/skmultiflow/trees/hoeffding_tree.py in predict(self, X)
1068 r, _ = get_dimensions(X)
1069 predictions = []
-> 1070 y_proba = self.predict_proba(X)
1071 for i in range(r):
1072 index = np.argmax(y_proba[i])
~/conceptDrift/projectTest/lib/python3.5/site-packages/skmultiflow/trees/hoeffding_tree.py in predict_proba(self, X)
1099 votes = normalize_values_in_dict(votes, inplace=False)
1100 if self.classes is not None:
-> 1101 y_proba = np.zeros(int(max(self.classes)) + 1)
1102 else:
1103 y_proba = np.zeros(int(max(votes.keys())) + 1)
TypeError: int() argument must be a string, a bytes-like object or a number, not 'type'
Code
import findspark
findspark.init()
import pyspark as ps
import warnings
from pyspark.sql import functions as fn
import sys
from pyspark import SparkContext,SparkConf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score as AUC
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from skmultiflow.trees.hoeffding_tree import HoeffdingTree
from skmultiflow.data.data_stream import DataStream
import time
def drift_detector(S,T,threshold = 0.75):
T = pd.DataFrame(T)
#print(T)
S = pd.DataFrame(S)
# Give slack variable in_target which is 1 for old and 0 for new
T['in_target'] = 0 # in target set
S['in_target'] = 1 # in source set
# Combine source and target with new slack variable
ST = pd.concat( [T, S], ignore_index=True, axis=0)
labels = ST['in_target'].values
ST = ST.drop('in_target', axis=1).values
# You can use any classifier for this step. We advise it to be a simple one as we want to see whether source
# and target differ not to classify them.
clf = LogisticRegression(solver='liblinear')
predictions = np.zeros(labels.shape)
# Divide ST into two equal chunks
# Train LR on a chunk and classify the other chunk
# Calculate AUC for original labels (in_target) and predicted ones
skf = StratifiedKFold(n_splits=2, shuffle=True)
for train_idx, test_idx in skf.split(ST, labels):
X_train, X_test = ST[train_idx], ST[test_idx]
y_train, y_test = labels[train_idx], labels[test_idx]
clf.fit(X_train, y_train)
probs = clf.predict_proba(X_test)[:, 1]
predictions[test_idx] = probs
auc_score = AUC(labels, predictions)
print(auc_score)
# Signal drift if AUC is larger than the threshold
if auc_score > threshold:
return True
else:
return False
class D3():
def __init__(self, w, rho, dim, auc):
self.size = int(w*(1+rho))
self.win_data = np.zeros((self.size,dim))
self.win_label = np.zeros(self.size)
self.w = w
self.rho = rho
self.dim = dim
self.auc = auc
self.drift_count = 0
self.window_index = 0
def addInstance(self,X,y):
if(self.isEmpty()):
self.win_data[self.window_index] = X
self.win_label[self.window_index] = y
self.window_index = self.window_index + 1
else:
print("Error: Buffer is full!")
def isEmpty(self):
return self.window_index < self.size
def driftCheck(self):
if drift_detector(self.win_data[:self.w], self.win_data[self.w:self.size], auc): #returns true if drift is detected
self.window_index = int(self.w * self.rho)
self.win_data = np.roll(self.win_data, -1*self.w, axis=0)
self.win_label = np.roll(self.win_label, -1*self.w, axis=0)
self.drift_count = self.drift_count + 1
return True
else:
self.window_index = self.w
self.win_data = np.roll(self.win_data, -1*(int(self.w*self.rho)), axis=0)
self.win_label =np.roll(self.win_label, -1*(int(self.w*self.rho)), axis=0)
return False
def getCurrentData(self):
return self.win_data[:self.window_index]
def getCurrentLabels(self):
return self.win_label[:self.window_index]
def select_data(x):
x = "/user/hadoop1/tellus/sea_1.csv"
peopleDF = spark.read.csv(x, header= True)
df = peopleDF.toPandas()
scaler = MinMaxScaler()
df.iloc[:,0:df.shape[1]-1] = scaler.fit_transform(df.iloc[:,0:df.shape[1]-1])
return df
def check_true(y,y_hat):
if(y==y_hat):
return 1
else:
return 0
df = select_data("/user/hadoop1/tellus/sea_1.csv")
stream = DataStream(df)
stream.prepare_for_use()
stream_clf = HoeffdingTree()
w = int(2000)
rho = float(0.4)
auc = float(0.60)
# In[ ]:
D3_win = D3(w,rho,stream.n_features,auc)
stream_acc = []
stream_record = []
stream_true= 0
i=0
start = time.time()
X,y = stream.next_sample(int(w*rho))
stream_clf.partial_fit(X,y, classes=stream.target_values)
while(stream.has_more_samples()):
X,y = stream.next_sample()
if D3_win.isEmpty():
D3_win.addInstance(X,y)
y_hat = stream_clf.predict(X)
Problem was with select_data() function, data type of variables was being changed during the execution. This issue is fixed now.

Trying to to use Caffe classifier causes "sequence argument must have length equal to input rank "error

I am trying to use Caffe.Classifier class and its predict() method on my Imagenet trained caffemodel.
Images were resized to 256x256 and crops of 227x227 were used to train the net.
Everything is simple and straight forward, yet I keep getting weird errors such as the following :
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-7-3b440ebf1f6e> in <module>()
17 image_dims=(256, 256))
18
---> 19 out = net.predict([image_caffe], oversample=True)
20 print(labels[out[0].argmax()].strip(),' (', out[0][out[0].argmax()] , ')')
21 plabel = int(labels[out[0].argmax()].strip())
<ipython-input-5-e6ae1810b820> in predict(self, inputs, oversample)
65 for ix, in_ in enumerate(inputs):
66 print('image dims = ',self.image_dims[0],',',self.image_dims[1] ,'_in = ',in_.shape)
---> 67 input_[ix] = caffe.io.resize_image(in_, self.image_dims)
68
69 if oversample:
C:\Users\Master\Anaconda3\envs\anaconda35\lib\site-packages\caffe\io.py in resize_image(im, new_dims, interp_order)
335 # ndimage interpolates anything but more slowly.
336 scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2]))
--> 337 resized_im = zoom(im, scale + (1,), order=interp_order)
338 return resized_im.astype(np.float32)
339
C:\Users\Master\Anaconda3\envs\anaconda35\lib\site-packages\scipy\ndimage\interpolation.py in zoom(input, zoom, output, order, mode, cval, prefilter)
588 else:
589 filtered = input
--> 590 zoom = _ni_support._normalize_sequence(zoom, input.ndim)
591 output_shape = tuple(
592 [int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
C:\Users\Master\Anaconda3\envs\anaconda35\lib\site-packages\scipy\ndimage\_ni_support.py in _normalize_sequence(input, rank, array_type)
63 if len(normalized) != rank:
64 err = "sequence argument must have length equal to input rank"
---> 65 raise RuntimeError(err)
66 else:
67 normalized = [input] * rank
RuntimeError: sequence argument must have length equal to input rank
And here is the snippets of code I'm using :
import sys
import caffe
import numpy as np
import lmdb
import matplotlib.pyplot as plt
import itertools
def flat_shape(x):
"Returns x without singleton dimension, eg: (1,28,28) -> (28,28)"
return x.reshape(x.shape)
def db_reader(fpath, type='lmdb'):
if type == 'lmdb':
return lmdb_reader(fpath)
else:
return leveldb_reader(fpath)
def lmdb_reader(fpath):
import lmdb
lmdb_env = lmdb.open(fpath)
lmdb_txn = lmdb_env.begin()
lmdb_cursor = lmdb_txn.cursor()
for key, value in lmdb_cursor:
datum = caffe.proto.caffe_pb2.Datum()
datum.ParseFromString(value)
label = int(datum.label)
image = caffe.io.datum_to_array(datum).astype(np.uint8)
yield (key, flat_shape(image), label)
def leveldb_reader(fpath):
import leveldb
db = leveldb.LevelDB(fpath)
for key, value in db.RangeIter():
datum = caffe.proto.caffe_pb2.Datum()
datum.ParseFromString(value)
label = int(datum.label)
image = caffe.io.datum_to_array(datum).astype(np.uint8)
yield (key, flat_shape(image), label)
Classifier class (copied form Caffe's python directory):
import numpy as np
import caffe
class Classifier(caffe.Net):
"""
Classifier extends Net for image class prediction
by scaling, center cropping, or oversampling.
Parameters
----------
image_dims : dimensions to scale input for cropping/sampling.
Default is to scale to net input size for whole-image crop.
mean, input_scale, raw_scale, channel_swap: params for
preprocessing options.
"""
def __init__(self, model_file, pretrained_file, image_dims=None,
mean=None, input_scale=None, raw_scale=None,
channel_swap=None):
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
# configure pre-processing
in_ = self.inputs[0]
print('inputs[0]',self.inputs[0])
self.transformer = caffe.io.Transformer(
{in_: self.blobs[in_].data.shape})
self.transformer.set_transpose(in_, (2, 0, 1))
if mean is not None:
self.transformer.set_mean(in_, mean)
if input_scale is not None:
self.transformer.set_input_scale(in_, input_scale)
if raw_scale is not None:
self.transformer.set_raw_scale(in_, raw_scale)
if channel_swap is not None:
self.transformer.set_channel_swap(in_, channel_swap)
print('crops: ',self.blobs[in_].data.shape[2:])
self.crop_dims = np.array(self.blobs[in_].data.shape[2:])
if not image_dims:
image_dims = self.crop_dims
self.image_dims = image_dims
def predict(self, inputs, oversample=True):
"""
Predict classification probabilities of inputs.
Parameters
----------
inputs : iterable of (H x W x K) input ndarrays.
oversample : boolean
average predictions across center, corners, and mirrors
when True (default). Center-only prediction when False.
Returns
-------
predictions: (N x C) ndarray of class probabilities for N images and C
classes.
"""
# Scale to standardize input dimensions.
input_ = np.zeros((len(inputs),
self.image_dims[0],
self.image_dims[1],
inputs[0].shape[2]),
dtype=np.float32)
for ix, in_ in enumerate(inputs):
print('image dims = ',self.image_dims[0],',',self.image_dims[1] ,'_in = ',in_.shape)
input_[ix] = caffe.io.resize_image(in_, self.image_dims)
if oversample:
# Generate center, corner, and mirrored crops.
input_ = caffe.io.oversample(input_, self.crop_dims)
else:
# Take center crop.
center = np.array(self.image_dims) / 2.0
crop = np.tile(center, (1, 2))[0] + np.concatenate([
-self.crop_dims / 2.0,
self.crop_dims / 2.0
])
input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :]
# Classify
caffe_in = np.zeros(np.array(input_.shape)[[0, 3, 1, 2]],
dtype=np.float32)
for ix, in_ in enumerate(input_):
caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_)
out = self.forward_all(**{self.inputs[0]: caffe_in})
predictions = out[self.outputs[0]]
# For oversampling, average predictions across crops.
if oversample:
predictions = predictions.reshape((len(predictions) / 10, 10, -1))
predictions = predictions.mean(1)
return predictions
Main section :
proto ='deploy.prototxt'
model='snap1.caffemodel'
mean='imagenet_mean.binaryproto'
db_path='G:/imagenet/ilsvrc12_val_lmdb'
# Extract mean from the mean image file
#mean_blobproto_new = caffe.proto.caffe_pb2.BlobProto()
#f = open(mean, 'rb')
#mean_blobproto_new.ParseFromString(f.read())
#mean_image = caffe.io.blobproto_to_array(mean_blobproto_new)
#f.close()
mu = np.load('mean.npy').mean(1).mean(1)
caffe.set_mode_gpu()
reader = lmdb_reader(db_path)
i = 0
for i, image, label in reader:
image_caffe = image.reshape(1, *image.shape)
print(image_caffe.shape, mu.shape)
net = Classifier(proto, model,
mean= mu,
channel_swap=(2,1,0),
raw_scale=255,
image_dims=(256, 256))
out = net.predict([image_caffe], oversample=True)
print(i, labels[out[0].argmax()].strip(),' (', out[0][out[0].argmax()] , ')')
i+=1
What is wrong here?
I found the cause, I had to feed the image in the form of 3D tensor not a 4D one!
so our 4d tensor:
image_caffe = image.reshape(1, *image.shape)
needed to be changed to a 3D one:
image_caffe = image.transpose(2,1,0)
As a side note, try using python2 for running any caffe related. python3 might work at first but will definitely cause a lot of headaches. for instance, predict method with oversample set to True, will crash under python3 but works just fine under python2!

Resources