3D tensors expect 2 values for padding Torch - pytorch

I have succesfully trained and tested my model using this tutorial, and I want to test my model using single images. Here is my code
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torch.utils.data import DataLoader,Dataset
import matplotlib.pyplot as plt
import torchvision.utils
import numpy as np
import random
from PIL import Image
import torch
from torch.autograd import Variable
import PIL.ImageOps
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import os
class Config():
training_dir = "./data/faces/training/"
testing_dir = "./data/faces/testing/"
train_batch_size = 80
train_number_epochs = 100
class SiameseNetworkDataset(Dataset):
...
class SiameseNetwork(nn.Module):
...
class ContrastiveLoss(torch.nn.Module):
...
if __name__=='__main__':
net = SiameseNetwork().cuda()
net.load_state_dict(torch.load("model.pt"))
img0 = Image.open(os.path.join('data', 'faces', 'testing', 's5', '2.png'))
img1 = Image.open(os.path.join('data', 'faces', 'testing', 's5', '1.png'))
img0 = img0.convert("L")
img1 = img1.convert("L")
img0 = PIL.ImageOps.invert(img0)
img1 = PIL.ImageOps.invert(img1)
transform=transforms.Compose([transforms.Resize((100,100)),
transforms.ToTensor()
])
img0 = transform(img0)
img1 = transform(img1)
img0 = img0.cuda()
img1 = img1.cuda()
output1,output2 = net(Variable(img0).cuda(),Variable(img1).cuda()) //The error occurred here
euclidean_distance = F.pairwise_distance(output1, output2)
print(euclidean_distance.cpu().data.numpy())
And I have the following error: AssertionError: 3D tensors expect 2 values for padding. I can't understand, what is wrong, because I use the same preprocessing, which is used in SiameseNetworkDataset

Related

Fastai for time series regression

So I have been using fastai library for a couple of years now. Recently, I came upon the extension library dedicated for the time series analysis - tsai
I am trying to perform simple regression task on the famous airpassengers dataset.
I have no idea what I am doing wrong:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import torch
import random
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
# fastai
from fastai import *
from fastai.text import *
from fastai.text.all import *
from tsai.all import *
flight_data = sns.load_dataset("flights")
flight_data.head(20)
scaler = MinMaxScaler(feature_range=(-1, 1))
# flight_data['passengers'] = scaler.fit_transform(flight_data['passengers'].values.reshape(-1, 1)).flatten()
plt.figure(figsize=(10, 4))
plt.plot(flight_data['passengers'])
def create_inout_sequences(input_data, tw):
inout_seq = []
label_seq = []
L = len(input_data)
for i in range(L-tw):
train_seq = input_data[i:i+tw]
train_label = input_data[i+tw:i+tw+1]
inout_seq.append(train_seq)
label_seq.append(train_label)
return np.array(inout_seq), np.array(label_seq)
data = flight_data['passengers'].values
x, y = create_inout_sequences(data, 15)
src = itemify(x, y)
yy = y.reshape(-1)
xx = x.reshape(-1)
tfms = [None, [TSRegression()]]
batch_tfms = TSStandardize(by_sample=True, by_var=True)
dls = get_ts_dls(x, yy, tfms=tfms, bs=64)
dls.show_batch()
dls.one_batch()
dls.c
learn = ts_learner(dls, InceptionTime, metrics=[mae, rmse], cbs=ShowGraph())
learn.lr_find()

Keras Model Training with Azure Machine Learning

I have trained a multiclass-classification model locally using Keras. I am attempting to migrate this so that it can be trained and run in Azure Machine Learning Studio (AML).
I have provided the sections of code below which are used in AML - the Main AML Code and the script to train the model (EnsemblingModel.py). From the Main AML Code, the script to train the model is called via src = (Script Run Config).
Please note that I have also uploaded the dataset which the model should be trained upon to AML directly and is titled 'test_data'.
However an error is returned when executing the line RunDetails(run).show() from the Main AML code section. The error is:
Error occurred: User program failed with FileNotFoundError: [Errno 2] No such file or directory: 'test_data'
This error message refers to the the following line from the EnsemblingModel.py script:
dataframe = pd.read_csv("test_data", header=None)
I understand that the script is unable to load the data and I have therefore tried changing the code, for example:
dataframe = dataset.get_by_name(ws, name='test_data')
Which returned the following error:
Error occurred: User program failed with NameError: name 'dataset' is not defined
How do I change this so that the script is able to read and load the data so that training can commence? Maybe I am going about this completely the wrong way, so any advice is welcomed.
I have consulted the various Microsoft documentation as well as Github azure guides here, but there seems to be limited examples.
I am new to AML, so if anyone has any resources for using it alongside Keras, then that would also be appreciated.
Main AML Code:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import azureml
from azureml.core import Experiment
from azureml.core import Environment
from azureml.core import Dataset
from azureml.core import Workspace, Run
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep='\n')
from azureml.core import Experiment
script_folder = './TestingModel1'
os.makedirs(script_folder, exist_ok=True)
exp = Experiment(workspace=ws, name='TestingModel1')
dataset = Dataset.get_by_name(ws, name='test_data')
dataframe = dataset.to_pandas_dataframe()
df = dataframe.values
cluster_name = "cpu-cluster"
try:
compute_target = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing compute target')
except ComputeTargetException:
print('Creating a new compute target...')
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',
max_nodes=4)
compute_target = ComputeTarget.create(ws, cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)
compute_targets = ws.compute_targets
for name, ct in compute_targets.items():
print(name, ct.type, ct.provisioning_state)
from azureml.core import Environment
keras_env = Environment.from_conda_specification(name = 'keras-2.3.1', file_path = './conda_dependencies.yml')
# Specify a GPU base image
#keras_env.docker.enabled = True
keras_env.docker.base_image = 'mcr.microsoft.com/azureml/openmpi3.1.2-cuda10.0-cudnn7-ubuntu18.04'
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory=script_folder,
script='EnsemblingModel.py',
compute_target=compute_target,
environment=keras_env)
run = exp.submit(src)
from azureml.widgets import RunDetails
RunDetails(run).show()
Ensembling Model Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#KerasLibraries
from keras import callbacks
from keras.layers.normalization import BatchNormalization
from keras.layers import Activation
from keras.layers import Dropout
from keras.optimizers import SGD
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
#tensorFlow
import tensorflow as tf
#SKLearnLibraries
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from azureml.core import Run
# In[3]:
dataframe = pd.read_csv("test_data", header=None)
dataframe = dataset.get_by_name(ws, name='test_data')
dataset = dataframe.values
# In[4]:
X = dataset[:,0:22].astype(float)
y = dataset[:,22]
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(y)
encoded_y = encoder.transform(y)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = np_utils.to_categorical(encoded_y)
print(dummy_y.shape)
#print(X.shape)
#print(X)
import sys
np.set_printoptions(threshold=sys.maxsize)
dummy_y_new = dummy_y[0:42,:]
print(dummy_y_new)
#dataset
# In[5]:
earlystopping = callbacks.EarlyStopping(monitor ="val_loss",
mode ="min", patience = 125,
restore_best_weights = True)
#define Keras
model1 = Sequential()
model1.add(Dense(50, input_dim=22))
model1.add(BatchNormalization())
model1.add(Activation('relu'))
model1.add(Dropout(0.5,input_shape=(50,)))
model1.add(Dense(50))
model1.add(BatchNormalization())
model1.add(Activation('relu'))
model1.add(Dropout(0.5,input_shape=(50,)))
model1.add(Dense(8, activation='softmax'))
#compile the keras model
model1.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
# fit the keras model on the dataset
model1.fit(X, dummy_y, validation_split=0.25, epochs=10000, batch_size=100, verbose=1, callbacks=[earlystopping])
_, accuracy3 = model1.evaluate(X, dummy_y, verbose=0)
print('Accuracy: %.2f' % (accuracy3*100))
predict_dataset = tf.convert_to_tensor([
[1,5,1,0.459,0.322,0.041,0.002,0.103,0.032,0.041,14,0.404,0.284,0.052,0.008,0.128,0.044,0.037,0.043,54,0,155],
])
predictions = model1(predict_dataset, training=False)
predictions2 = predictions.numpy()
print(predictions2)
print(type(predictions2))
I have resolved the above issue by adding an argument to the ScriptRunConfig code:
test_data_ds = Dataset.get_by_name(ws, name='test_data')
src = ScriptRunConfig(source_directory=script_folder,
script='EnsemblingModel.py',
# pass dataset as an input with friendly name 'titanic'
arguments=['--input-data', test_data_ds.as_named_input('test_data')],
compute_target=compute_target,
environment=keras_env)
As well as the following to the modelling script itself:
import argparse
from azureml.core import Dataset, Run
parser = argparse.ArgumentParser()
parser.add_argument("--input-data", type=str)
args = parser.parse_args()
run = Run.get_context()
ws = run.experiment.workspace
# get the input dataset by ID
dataset = Dataset.get_by_id(ws, id=args.input_data)
# load the TabularDataset to pandas DataFrame
df = dataset.to_pandas_dataframe()
dataset = df.values
For anyone curious, more information can be found here:

RandomAdjustSharpness gives IndexError: tuple index out of range

While using RandomAdjustSharpness, my code throws the following error - IndexError: tuple index out of range. I followed the instructions given over here - https://pytorch.org/vision/stable/transforms.html and therefore am confused with this error.
Here is my code -
import math, random
from sklearn.datasets import load_sample_images
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
def random_crop(imgs):
imgs = torch.tensor(imgs)
change = torch.nn.Sequential(
transforms.RandomCrop(427),
transforms.RandomAdjustSharpness(1, p=1)
)
imgs = change(imgs).numpy()
return imgs
###Obtaining a random image and preprocessing it!##
dataset = load_sample_images()
first_img_data = dataset.images[0]
first_img_data = first_img_data.reshape(-1, 427, 640)
first_img_data = first_img_data[1, :, :]
#first_img_data = first_img_data[0:84, 0:84].reshape(-1, 84,84)
# first_img_data = torch.tensor(first_img_data)
plt.figure()
plt.imshow(np.squeeze(first_img_data))
foo = random_crop(first_img_data)
plt.figure()
plt.imshow(np.squeeze(foo))
plt.show()
you need to a dimension to your tensor like this
torch.tensor([imgs])

pytorch customdataset notimplemented error

Im making my own custom datasets in pytorch.
AND, I would like to visualize the image.
however, i think something wrong is happening in custom datasets.
Please help me.
NotImplementedError Traceback (most recent call
last) in ()
1 import matplotlib.pyplot as plt
2 dat= TrainDataset(transforms.ToTensor())
----> 3 img,label= dat[i]
4 plt.imshow(img.permute(1,2,0))
/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataset.py in
getitem(self, index)
31
32 def getitem(self, index) -> T_co:
---> 33 raise NotImplementedError
34
35 def add(self, other: 'Dataset[T_co]') -> 'ConcatDataset[T_co]':
NotImplementedError:
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision
from torch.utils.data import DataLoader
import os
import glob
from torch.utils.data import Dataset
import pandas as pd
from PIL import Image
class TrainDataset(Dataset):
def __init__(self, transform):
super().__init__()
self.data = pd.read_csv('/content/drive/MyDrive/cancer/train_labels.csv')
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self,idx):
img_name, label = self.data.iloc[idx]
img = Image.open(f'/content/drive/MyDrive/cancer/test/{image_name}.tif')
img = self.transform(img)
return (img, torch.tensor(label).long())
import matplotlib.pyplot as plt
dat= TrainDataset(transforms.ToTensor())
img,label= dat[1]
plt.imshow(img.permute(1,2,0))

Making Predictions (classifying chess pieces)

I am trying to identify all the pieces present on the Chessboard via machine learning.Currently I am predicting for a single piece.I want to load the trained model from the disk,loop through the board, get the playing square crop, and the model will predict the piece that is on that square.
I want to do like this- https://www.youtube.com/watch?v=jcFvrCsoY_w
This is my current code for prediction of single piece.Help me to loop through the board and get playing square crop like above video.
import cv2
import time
import os
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
model = load_model('/home/tejas/Videos/chess/model_50x50.hd5')
label_map = list('KQRBNP_kqrbnp')
def predict(img, model, img_size=(50,50), plot=False):
img = cv2.resize(img, img_size)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY )
if plot:
plt.imshow(img, cmap='gray')
img = img.reshape(1, *img_size, 1) / 255
pred = model.predict(img)
return label_map[np.argmax(pred)]
path = '/media/tejas/creator/chess/train_data/black/r/r_90_1579252980.226565.jpg'
name_map = {
'K':'White King',
'Q':'White Queen',
'R':'White Rook',
'B':'White Bishop',
'N':'White Knight',1y0
'P':'White Pawn',
'_':'Empty Square',
'k':'Black King',
'q':'Black Queen',
'r':'Black Rook',
'b':'Black Bishop',
'n':'Black Knight',
'p':'Black Pawn',
}
img = cv2.imread(path)
pred = predict(img, model, plot=True)
print('The image is a', name_map[pred])
Thanks !!!

Resources