python3 AttributeError: 'list' object has no attribute 'dot' - python-3.x

#unit vector
def normalized(self):
try:
unit = [ x/self.magnitude() for x in self.coordinates ]
return unit
except ZeroDivisionError:
raise Exception("Can not normalize the zero vector")
#dot product of vector
def dot(self, v):
x = [ x*y for x,y in zip(self.coordinates, v.coordinates)]
return sum(x)
#radians and angle of vector
def angle_with(self, v , degrees = False):
try:
u1 = self.normalized()
u2 = v.normalized()
print(u1, u2)
angle_in_radians = math.acos(u1.dot(u2))
if degrees:
degrees_per_radian = 180. / math.pi
return angle_in_radians * degrees_per_radian
else:
return angle_in_radians
How can I use "dot" function in here?

Try this
class Vector(object):
def __init__(self, coordinates):
self.coordinates = tuple(coordinates)
self.dimension = len(coordinates)
def normalized(self):
try:
a = 1.0/self.magnitude()
res = [a*x for x in self.coordinates]
return res
except ZeroDivisionError:
raise Exception('your error messsage')
def dot(self, v):
return sum([x * y for x,y in zip(self.coordinates,v.coordinates)])
def angle_with(self, v, in_degree=False):
try:
u1 = Vector(self.normalized())
u2 = Vector(v.normalized())
angle_in_redians = math.acos(u1.dot(u2))
if in_degree:
degrees_per_redian = 180. /pi
return angle_in_redians * degrees_per_redian
else:
return angle_in_redians
OR
#same Vector class and init method
def magnitude(self):
squared_coordinates = [x**2 for x in self.coordinates]
return sqrt(sum(squared_coordinates))
def dot(self, v):
return sum([x * y for x,y in zip(self.coordinates,v.coordinates)])
def angle_with(self, v, in_degree=False):
try:
dot = self.dot(v)
magnitude_v1 = self.magnitude()
magnitude_v2 = v.magnitude()
res = dot/(magnitude_v1*magnitude_v2)
angle_in_redians = math.acos(res)
if in_degree:
degrees_per_redian = 180. /pi
return angle_in_redians * degrees_per_redian
else:
return angle_in_redians

Related

Why does the error say: class has no attribute in my case

I have the following code. I'm running a "fit" function and I'm getting the following error: 'P' object has no attribute 'iterations'. I do not really understand why its happening, I declare it in the "init" method.
import numpy as np
class P:
def __init__(self, iterations: int = 100):
self.w = None
self.iterations = iterations
self.classes_map = None
def fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
X = np.hstack((np.ones((X.shape[0], 1)), X))
self.w = np.zeros(X.shape[1])
classes = np.unique(y)
self.classes_map = {-1: classes[0], 1: classes[1]}
y_ = np.where(y == classes[0], -1, 1)
for _ in range(self.iterations):
predictions = np.sign(X.dot(self.w))
incorrect_indices = predictions != y_
if np.any(incorrect_indices):
self.w += np.dot(y_[incorrect_indices], X[incorrect_indices])
def predict(self, X: np.ndarray) -> np.ndarray:
X = np.hstack((np.ones((X.shape[0], 1)), X))
predictions = np.sign(X.dot(self.w))
predictions[predictions == -1] = self.classes_map[-1]
predictions[predictions == 1] = self.classes_map[1]
return predictions
So I try to call it:
X, true_labels = make_blobs(400, 2, centers=[[0, 0], [2.5, 2.5]])
c = P()
c.fit(X, true_labels) #AttributeError: 'P' object has no attribute 'iterations'

bilinear interpolation for angles

I have a 2d array of directional data. I need to interpolate over a higher resolution grid however the ready made functions like scipy interp2d, etc don't account for the discontinuity between 0 and 360.
I have code for doing this for a single grid of 4 points (thanks How to perform bilinear interpolation in Python and Rotation Interpolation) however I would like it to accept big data sets at once - just like the interp2d function. How can I encorporate this into the code below in a way which doesn't just loop over all of the data?
Thanks!
def shortest_angle(beg,end,amount):
shortest_angle=((((end - beg) % 360) + 540) % 360) - 180
return shortest_angle*amount
def bilinear_interpolation_rotation(x, y, points):
'''Interpolate (x,y) from values associated with four points.
The four points are a list of four triplets: (x, y, value).
The four points can be in any order. They should form a rectangle.
'''
points = sorted(points) # order points by x, then by y
(x1, y1, q11), (_x1, y2, q12), (x2, _y1, q21), (_x2, _y2, q22) = points
if x1 != _x1 or x2 != _x2 or y1 != _y1 or y2 != _y2:
raise ValueError('points do not form a rectangle')
if not x1 <= x <= x2 or not y1 <= y <= y2:
raise ValueError('(x, y) not within the rectangle')
# interpolate over the x value at each y point
fxy1 = q11 + shortest_angle(q11,q21,((x-x1)/(x2-x1)))
fxy2 = q12 + shortest_angle(q12,q22,((x-x1)/(x2-x1)))
# interpolate over the y values
fxy = fxy1 + shortest_angle(fxy1,fxy2,((y-y1)/(y2-y1)))
return fxy
I'm going to reuse some personal Point and Point3D simplified classes for this example:
Point
class Point:
#Constructors
def __init__(self, x, y):
self.x = x
self.y = y
# Properties
#property
def x(self):
return self._x
#x.setter
def x(self, value):
self._x = float(value)
#property
def y(self):
return self._y
#y.setter
def y(self, value):
self._y = float(value)
# Printing magic methods
def __repr__(self):
return "({p.x},{p.y})".format(p=self)
# Comparison magic methods
def __is_compatible(self, other):
return hasattr(other, 'x') and hasattr(other, 'y')
def __eq__(self, other):
if not self.__is_compatible(other):
return NotImplemented
return (self.x == other.x) and (self.y == other.y)
def __ne__(self, other):
if not self.__is_compatible(other):
return NotImplemented
return (self.x != other.x) or (self.y != other.y)
def __lt__(self, other):
if not self.__is_compatible(other):
return NotImplemented
return (self.x, self.y) < (other.x, other.y)
def __le__(self, other):
if not self.__is_compatible(other):
return NotImplemented
return (self.x, self.y) <= (other.x, other.y)
def __gt__(self, other):
if not self.__is_compatible(other):
return NotImplemented
return (self.x, self.y) > (other.x, other.y)
def __ge__(self, other):
if not self.__is_compatible(other):
return NotImplemented
return (self.x, self.y) >= (other.x, other.y)
It represents a 2D point. It has a simple constructor, x and y properties that ensure they always store floats, magic methods for string representation as (x,y) and comparison to make them sortable (sorts by x, then by y). My original class has additional features such as addition and substraction (vector behaviour) magic methods both but they are not needed for this example.
Point3D
class Point3D(Point):
# Constructors
def __init__(self, x, y, z):
super().__init__(x, y)
self.z = z
#classmethod
def from2D(cls, p, z):
return cls(p.x, p.y, z)
# Properties
#property
def z(self):
return self._z
#z.setter
def z(self, value):
self._z = (value + 180.0) % 360 - 180
# Printing magic methods
def __repr__(self):
return "({p.x},{p.y},{p.z})".format(p=self)
# Comparison magic methods
def __is_compatible(self, other):
return hasattr(other, 'x') and hasattr(other, 'y') and hasattr(other, 'z')
def __eq__(self, other):
if not self.__is_compatible(other):
return NotImplemented
return (self.x == other.x) and (self.y == other.y) and (self.z == other.z)
def __ne__(self, other):
if not self.__is_compatible(other):
return NotImplemented
return (self.x != other.x) or (self.y != other.y) or (self.z != other.z)
def __lt__(self, other):
if not self.__is_compatible(other):
return NotImplemented
return (self.x, self.y, self.z) < (other.x, other.y, other.z)
def __le__(self, other):
if not self.__is_compatible(other):
return NotImplemented
return (self.x, self.y, self.z) <= (other.x, other.y, other.z)
def __gt__(self, other):
if not self.__is_compatible(other):
return NotImplemented
return (self.x, self.y, self.z) > (other.x, other.y, other.z)
def __ge__(self, other):
if not self.__is_compatible(other):
return NotImplemented
return (self.x, self.y, self.z) >= (other.x, other.y, other.z)
Same as Point but for 3D points. It also includes an additional constructor classmethod that takes a Point and its z value as arguments.
Linear interpolation
def linear_interpolation(x, *points, extrapolate=False):
# Check there are a minimum of two points
if len(points) < 2:
raise ValueError("Not enought points given for interpolation.")
# Sort the points
points = sorted(points)
# Check that x is the valid interpolation interval
if not extrapolate and (x < points[0].x or x > points[-1].x):
raise ValueError("{} is not in the interpolation interval.".format(x))
# Determine which are the two surrounding interpolation points
if x < points[0].x:
i = 0
elif x > points[-1].x:
i = len(points)-2
else:
i = 0
while points[i+1].x < x:
i += 1
p1, p2 = points[i:i+2]
# Interpolate
return Point(x, p1.y + (p2.y-p1.y) * (x-p1.x) / (p2.x-p1.x))
It takes a first position argument that will determine the x whose y value we want to calculate, and an infinite amount of Point instances from where we want to interpolate. A keyword argument (extrapolate) allows to turn on extrapolation. A Point instance is returned with the requested x and the calculated y values.
Bilinear interpolation
I offer two alternatives, both of them have a similar signature to the previous interpolation function. A Point whose z value we want to calculate, a keyword argument (extrapolate) that turns on extrapolation and return a Point3D instance with the requested and calculated data. The difference between these two approaches are how the values that will be used to interpolate are provided:
Approach 1
The first approach takes a two-levels-deep nested dict. The first level keys represent the x values, the second level keys the y values and the second level values the z values.
def bilinear_interpolation(p, points, extrapolate=False):
x_values = sorted(points.keys())
# Check there are a minimum of two x values
if len(x_values) < 2:
raise ValueError("Not enought points given for interpolation.")
y_values = set()
for value in points.values():
y_values.update(value.keys())
y_values = sorted(y_values)
# Check there are a minimum of two y values
if len(y_values) < 2:
raise ValueError("Not enought points given for interpolation.")
# Check that p is in the valid interval
if not extrapolate and (p.x < x_values[0] or p.x > x_values[-1] or p.y < y_values[0] or p.y > y_values[-1]):
raise ValueError("{} is not in the interpolation interval.".format(p))
# Determine which are the four surrounding interpolation points
if p.x < x_values[0]:
i = 0
elif p.x > x_values[-1]:
i = len(x_values) - 2
else:
i = 0
while x_values[i+1] < p.x:
i += 1
if p.y < y_values[0]:
j = 0
elif p.y > y_values[-1]:
j = len(y_values) - 2
else:
j = 0
while y_values[j+1] < p.y:
j += 1
surroundings = [
Point(x_values[i ], y_values[j ]),
Point(x_values[i ], y_values[j+1]),
Point(x_values[i+1], y_values[j ]),
Point(x_values[i+1], y_values[j+1]),
]
for i, surrounding in enumerate(surroundings):
try:
surroundings[i] = Point3D.from2D(surrounding, points[surrounding.x][surrounding.y])
except KeyError:
raise ValueError("{} is missing in the interpolation grid.".format(surrounding))
p1, p2, p3, p4 = surroundings
# Interpolate
p12 = Point3D(p1.x, p.y, linear_interpolation(p.y, Point(p1.y,p1.z), Point(p2.y,p2.z), extrapolate=True).y)
p34 = Point3D(p3.x, p.y, linear_interpolation(p.y, Point(p3.y,p3.z), Point(p4.y,p4.z), extrapolate=True).y)
return Point3D(p.x, p12.y, linear_interpolation(p.x, Point(p12.x,p12.z), Point(p34.x,p34.z), extrapolate=True).y)
print(bilinear_interpolation(Point(2,3), {1: {2: 5, 4: 6}, 3: {2: 3, 4: 9}}))
Approach 2
The second approach takes an infinite number of Point3D instances.
def bilinear_interpolation(p, *points, extrapolate=False):
# Check there are a minimum of four points
if len(points) < 4:
raise ValueError("Not enought points given for interpolation.")
# Sort the points into a grid
x_values = set()
y_values = set()
for point in sorted(points):
x_values.add(point.x)
y_values.add(point.y)
x_values = sorted(x_values)
y_values = sorted(y_values)
# Check that p is in the valid interval
if not extrapolate and (p.x < x_values[0] or p.x > x_values[-1] or p.y < y_values[0] or p.y > y_values[-1]):
raise ValueError("{} is not in the interpolation interval.".format(p))
# Determine which are the four surrounding interpolation points
if p.x < x_values[0]:
i = 0
elif p.x > x_values[-1]:
i = len(x_values) - 2
else:
i = 0
while x_values[i+1] < p.x:
i += 1
if p.y < y_values[0]:
j = 0
elif p.y > y_values[-1]:
j = len(y_values) - 2
else:
j = 0
while y_values[j+1] < p.y:
j += 1
surroundings = [
Point(x_values[i ], y_values[j ]),
Point(x_values[i ], y_values[j+1]),
Point(x_values[i+1], y_values[j ]),
Point(x_values[i+1], y_values[j+1]),
]
for point in points:
for i, surrounding in enumerate(surroundings):
if point.x == surrounding.x and point.y == surrounding.y:
surroundings[i] = point
for surrounding in surroundings:
if not isinstance(surrounding, Point3D):
raise ValueError("{} is missing in the interpolation grid.".format(surrounding))
p1, p2, p3, p4 = surroundings
# Interpolate
p12 = Point3D(p1.x, p.y, linear_interpolation(p.y, Point(p1.y,p1.z), Point(p2.y,p2.z), extrapolate=True).y)
p34 = Point3D(p3.x, p.y, linear_interpolation(p.y, Point(p3.y,p3.z), Point(p4.y,p4.z), extrapolate=True).y)
return Point3D(p.x, p12.y, linear_interpolation(p.x, Point(p12.x,p12.z), Point(p34.x,p34.z), extrapolate=True).y)
print(bilinear_interpolation(Point(2,3), Point3D(3,2,3), Point3D(1,4,6), Point3D(3,4,9), Point3D(1,2,5)))
You can see from both approaches that they use the previously defined linear_interpoaltion function, and that they always set extrapolation to True as they already raised an exception if it was False and the requested point was outside the provided interval.

AttributeError: 'float' object has no attribute 'get_coords'

I'm learning Python from this lecture: Lec 19 | MIT 6.00 Introduction to Computer Science and Programming. I'm using Python 3.6.2, lecture example runs on Python 2.x. Whats the proper way to set values of x and y in function ans_quest?
x, y = loc_list[-1].get_coords()
Can this method be called like this? This was the example in the lecture.
Full code:
import math, random, pylab, copy
class Location(object):
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def move(self, xc, yc):
return Location(self.x+float(xc), self.y+float(yc))
def get_coords(self):
return self.x, self.y
def get_dist(self, other):
ox, oy = other.get_coords()
x_dist = self.x - ox
y_dist = self.y - oy
return math.sqrt(x_dist**2 + y_dist**2)
class Compass_Pt(object):
possibles = ('N', 'S', 'E', 'W')
def __init__(self, pt):
if pt in self.possibles: self.pt = pt
else: raise ValueError('in Compass_Pt.__init__')
def move(self, dist):
if self.pt == 'N': return (0, dist)
elif self.pt == 'S': return (0, -dist)
elif self.pt == 'E': return (dist, 0)
elif self.pt == 'W': return (-dist, 0)
else: raise ValueError('in Compass_Pt.move')
class Field(object):
''' Cartesian plane where object will be located '''
def __init__(self, drunk, loc):
self.drunk = drunk
self.loc = loc
def move(self, cp, dist):
old_loc = self.loc
xc, yc = cp.move(dist)
self.loc = old_loc.move(xc, yc)
def get_loc(self):
return self.loc
def get_drunk(self):
return self.drunk
class Drunk(object):
''' Point itself '''
def __init__(self, name):
self.name = name
def move(self, field, cp, dist = 1):
if field.get_drunk().name != self.name:
raise ValueError('Drunk.move called with drunk not in the field')
for i in range(dist):
field.move(cp, 1)
class Usual_Drunk(Drunk):
def move(self, field, dist = 1):
''' Drunk.move superclass method override. Sends additional cp attribute.'''
cp = random.choice(Compass_Pt.possibles)
Drunk.move(self, field, Compass_Pt(cp), dist)
class Cold_Drunk(Drunk):
def move(self, field, dist = 1):
cp = random.choice(Compass_Pt.possibles)
if cp == 'S':
Drunk.move(self, field, Compass_Pt(cp), 2*dist)
else:
Drunk.move(self, field, Compass_Pt(cp), dist)
class EW_Drunk(Drunk):
def move(self, field, time = 1):
cp = random.choice(Compass_Pt.possibles)
while cp != 'E' and cp != 'W':
cp = random.choice(Compass_Pt.possibles)
Drunk.move(self, field, Compass_Pt(cp), time)
def perform_trial(time, f):
start = f.get_loc()
distances = [0,0]
for t in range(1, time + 1):
f.get_drunk().move(f)
new_loc = f.get_loc()
distance = new_loc.get_dist(start)
distances.append(distance)
return distances
def perform_sim(time, num_trials, drunk_type):
dist_lists = []
loc_lists = []
for trial in range(num_trials):
d = drunk_type('Drunk' + str(trial))
f = Field(d, Location(0, 0))
distances = perform_trial(time, f)
locs = copy.deepcopy(distances)
dist_lists.append(distances)
loc_lists.append(locs)
return dist_lists, loc_lists
def ans_quest(max_time, num_trials, drunk_type, title):
dist_lists, loc_lists = perform_sim(max_time, num_trials, drunk_type)
means = []
for t in range(max_time + 1):
tot = 0.0
for dist_l in dist_lists:
tot += dist_l[t]
means.append(tot/len(dist_lists))
pylab.figure()
pylab.plot(means)
pylab.ylabel('distance')
pylab.xlabel('time')
pylab.title('{} Ave. Distance'.format(title))
lastX = []
lastY = []
for loc_list in loc_lists:
x, y = loc_list[-1].get_coords()
lastX.append(x)
lastY.append(y)
pylab.figure()
pylab.scatter(lastX, lastY)
pylab.ylabel('NW Distance')
pylab.title('{} Final location'.format(title))
pylab.figure()
pylab.hist(lastX)
pylab.xlabel('EW Value')
pylab.ylabel('Number of Trials')
pylab.title('{} Distribution of Final EW Values'.format(title))
num_steps = 50
num_trials = 10
ans_quest(num_steps, num_trials, Usual_Drunk, 'Usual Drunk ' + str(num_trials) + ' Trials')
ans_quest(num_steps, num_trials, Cold_Drunk, 'Cold Drunk ' + str(num_trials) + ' Trials')
ans_quest(num_steps, num_trials, EW_Drunk, 'EW Drunk ' + str(num_trials) + ' Trials')
pylab.show()
Error:
Traceback (most recent call last):
File "/home/tihe/Documents/CODING/Project Home/Python/biased_random_walks.py", line 194, in <module>
ans_quest(num_steps, num_trials, Usual_Drunk, 'Usual Drunk ' + str(num_trials) + ' Trials')
File "/home/tihe/Documents/CODING/Project Home/Python/biased_random_walks.py", line 175, in ans_quest
x, y = loc_list[-1].get_coords()
AttributeError: 'float' object has no attribute 'get_coords'
This method could be called like this if you had a list of Location objects. The error is because the loc_list is populated with distances and not Location objects. That happens in function perform_sim when instead of geting the location you are making a deep copy of distance.
Perhaps you could try something like this:
def perform_trial(time, f):
start = f.get_loc()
distances = [0,0]
locations = []
for t in range(1, time + 1):
f.get_drunk().move(f)
new_loc = f.get_loc()
locations.append(new_loc)
distance = new_loc.get_dist(start)
distances.append(distance)
return distances, locations
def perform_sim(time, num_trials, drunk_type):
dist_lists = []
loc_lists = []
for trial in range(num_trials):
d = drunk_type('Drunk' + str(trial))
f = Field(d, Location(0, 0))
distances, locations = perform_trial(time, f)
dist_lists.append(distances)
loc_lists.append(locations)
return dist_lists, loc_lists
I hope that helped you out.

TypeError: __init__() takes from 1 to 4 positional arguments but 9 were given

when l run the following program l got this error :
originDataset = dataset.lmdbDataset(originPath, 'abc', *args)
TypeError: __init__() takes from 1 to 4 positional arguments but 9 were given
This error is relate to the second code source l presented below. it's strange because l don't have 9 argument. what's wrong with my code ?
import sys
origin_path = sys.path
sys.path.append("..")
import dataset
sys.path = origin_path
import lmdb
def writeCache(env, cache):
with env.begin(write=True) as txn:
for k, v in cache.iteritems():
txn.put(k, v)
def convert(originPath, outputPath):
args = [0] * 6
originDataset = dataset.lmdbDataset(originPath, 'abc', *args)
print('Origin dataset has %d samples' % len(originDataset))
labelStrList = []
for i in range(len(originDataset)):
label = originDataset.getLabel(i + 1)
labelStrList.append(label)
if i % 10000 == 0:
print(i)
lengthList = [len(s) for s in labelStrList]
items = zip(lengthList, range(len(labelStrList)))
items.sort(key=lambda item: item[0])
env = lmdb.open(outputPath, map_size=1099511627776)
cnt = 1
cache = {}
nSamples = len(items)
for i in range(nSamples):
imageKey = 'image-%09d' % cnt
labelKey = 'label-%09d' % cnt
origin_i = items[i][1]
img, label = originDataset[origin_i + 1]
cache[labelKey] = label
cache[imageKey] = img
if cnt % 1000 == 0 or cnt == nSamples:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt - 1
cache['num-samples'] = str(nSamples)
writeCache(env, cache)
print('Convert dataset with %d samples' % nSamples)
if __name__ == "__main__":
convert('/share/datasets/scene_text/Synth90k/synth90k-val-lmdb', '/share/datasets/scene_text/Synth90k/synth90k-val-ordered-lmdb')
convert('/share/datasets/scene_text/Synth90k/synth90k-train-lmdb', '/share/datasets/scene_text/Synth90k/synth90k-train-ordered-lmdb')
which calls the following program :
#!/usr/bin/python
# encoding: utf-8
import random
import torch
from torch.utils.data import Dataset
from torch.utils.data import sampler
import torchvision.transforms as transforms
import lmdb
import six
import sys
from PIL import Image
import numpy as np
class lmdbDataset(Dataset):
def __init__(self, root=None, transform=None, target_transform=None):
self.env = lmdb.open(
root,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
if not self.env:
print('cannot creat lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get('num-samples'))
self.nSamples = nSamples
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
with self.env.begin(write=False) as txn:
img_key = 'image-%09d' % index
imgbuf = txn.get(img_key)
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
img = Image.open(buf).convert('L')
except IOError:
print('Corrupted image for %d' % index)
return self[index + 1]
if self.transform is not None:
img = self.transform(img)
label_key = 'label-%09d' % index
label = str(txn.get(label_key))
if self.target_transform is not None:
label = self.target_transform(label)
return (img, label)
class resizeNormalize(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
def __call__(self, img):
img = img.resize(self.size, self.interpolation)
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
return img
class randomSequentialSampler(sampler.Sampler):
def __init__(self, data_source, batch_size):
self.num_samples = len(data_source)
self.batch_size = batch_size
def __iter__(self):
n_batch = len(self) // self.batch_size
tail = len(self) % self.batch_size
index = torch.LongTensor(len(self)).fill_(0)
for i in range(n_batch):
random_start = random.randint(0, len(self) - self.batch_size)
batch_index = random_start + torch.range(0, self.batch_size - 1)
index[i * self.batch_size:(i + 1) * self.batch_size] = batch_index
# deal with tail
if tail:
random_start = random.randint(0, len(self) - self.batch_size)
tail_index = random_start + torch.range(0, tail - 1)
index[(i + 1) * self.batch_size:] = tail_index
return iter(index)
def __len__(self):
return self.num_samples
class alignCollate(object):
def __init__(self, imgH=32, imgW=128, keep_ratio=False, min_ratio=1):
self.imgH = imgH
self.imgW = imgW
self.keep_ratio = keep_ratio
self.min_ratio = min_ratio
def __call__(self, batch):
images, labels = zip(*batch)
imgH = self.imgH
imgW = self.imgW
if self.keep_ratio:
ratios = []
for image in images:
w, h = image.size
ratios.append(w / float(h))
ratios.sort()
max_ratio = ratios[-1]
imgW = int(np.floor(max_ratio * imgH))
imgW = max(imgH * self.min_ratio, imgW) # assure imgH >= imgW
transform = resizeNormalize((imgW, imgH))
images = [transform(image) for image in images]
images = torch.cat([t.unsqueeze(0) for t in images], 0)
return images, labels

how to print TensorVariable

I am a beginner of theano. I am studying it now. I'd like to print 'value' and 'shape' of TensorVariable while operating theano.function. When I used print fuction of python, print function ran before compiling theano function. So I learned using print fuction is useless. Thereofre I tried my another hand. I added a following syntax to execute theano.printing.Print.
(cce is return value of theano.scan. Therefore, Maybe it is not symbolic vaiable.
Actually, I am confused by the concept of TensorVariable and shared variable. TensorVariable is a sort of shared variable?)
x = theano.tensor.tensor3() # define data type
t_print =theano.printing.Print("cce value is : ")(x)
f = theano.function([x], t_print) # define theano.function
f(cce) # call f (print value of cce)
Then, Following Error Occured
TypeError: ('Bad input argument to theano function with name "seq2seq.py : 98" at index 0(0-based)', 'Expected an array-like object, but found a Variable: maybe you are trying to call a function on a (possibly shared) variable instead of a numeric array?')
Could you possibly let me know how to correct this code to print value of cce(TensorVariable) ? Or, is it impossible to print the value of TensorVariable when theano.function is on progress ?
Thank you for reading my question.
ADDED -
here is my source code. this is a large picture. theano.function() starts with last line. loss_func is 'categorical_crossentropy function'. last 4 line is about theano function
def categorical_crossentropy(y_true, y_pred):
y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
y_pred = y_pred.reshape( (-1, voca_dim_g) )
y_true = y_true.reshape( (-1, voca_dim_g) )
cce, updates = theano.scan(
fn=T.nnet.categorical_crossentropy,
sequences=[y_pred,y_true]
)
##### I want to print cce HERE #######
return T.mean(cce)
#staticmethod
def step(
x_t, h_tm1, c_tm1,
Ui, Wi, bi, Uf, Wf, bf,
Uo, Wo, bo, Ug, Wg, bg
):
"""
x_t.shape = (timestep=1, dim)
x_t.shape = (n_samples, timestep=1, dim)
"""
i_t = T.nnet.sigmoid(T.dot(x_t, Ui) + T.dot(h_tm1, Wi) + bi)
f_t = T.nnet.sigmoid(T.dot(x_t, Uf) + T.dot(h_tm1, Wf) + bf)
o_t = T.nnet.sigmoid(T.dot(x_t, Uo) + T.dot(h_tm1, Wo) + bo)
g_t = T.tanh(T.dot(x_t, Ug) + T.dot(h_tm1, Wg) + bg)
c_t = c_tm1 * f_t + g_t * i_t
h_t = T.tanh(c_t) * o_t
return h_t, c_t
#########################################################################################################################
def forward(self, X):
states, updates = theano.scan(
fn=self.step,
sequences=[ X ],
outputs_info=[self.h_tm1, self.c_tm1],
non_sequences=[
self.Ui, self.Wi, self.bi,
self.Uf, self.Wf, self.bf,
self.Uo, self.Wo, self.bo,
self.Ug, self.Wg, self.bg
]
)
updates = [(self.h_tm1, states[0][-1]), (self.c_tm1, states[1][-1])]
return states, updates
#########################################################################################################################
def encode(self, X):
states, updates = self.forward(X)
h_t = states[0][-1]
c_t = states[1][-1]
return h_t, c_t, updates
def decode_step(
self, y_t, h_tm1, c_tm1,
Ui, Wi, bi, Uf, Wf, bf,
Uo, Wo, bo, Ug, Wg, bg,
Wh, bh
):
h_t, c_t = self.step(
y_t, h_tm1, c_tm1,
Ui, Wi, bi, Uf, Wf, bf,
Uo, Wo, bo, Ug, Wg, bg
)
y_t = T.dot(h_t, Wh) + bh
return y_t, h_t, c_t
def decode(self, h_tm1, c_tm1, timesteps):
outputs, updates = theano.scan(
fn=self.decode_step,
outputs_info=[self.y_t, h_tm1, c_tm1],
non_sequences=[
self.Ui, self.Wi, self.bi,
self.Uf, self.Wf, self.bf,
self.Uo, self.Wo, self.bo,
self.Ug, self.Wg, self.bg,
self.Wh, self.bh
],
n_steps=timesteps
)
updates = [
(self.h_tm1, outputs[1][-1]),
(self.c_tm1, outputs[2][-1])
]
return outputs[0], updates
h_tm1, c_tm1, updates_encode = encode(seq_input)
seq_predict, updates_decode = decode(h_tm1, c_tm1, T.shape(seq_target)[0])
loss = loss_func(seq_predict, seq_target)
self._train = theano.function([seq_input, seq_target], loss, updates = updates)
below is full source code
# -*- coding: utf-8 -*-
__modifier__ = "Lee Guk Beom, Lee Jae Sang, Jang Jae Kwang (alphabetical Order)"
import readFile
import numpy as np
import theano
import theano.tensor as T
from six.moves import zip
from theano.compile.debugmode import DebugMode
import nltk
import sys
import os
from nltk.tokenize import sent_tokenize
import codecs
#theano.config.optimizer='fast_compile'
#theano.config.exception_verbosity='high'
#theano.config.compute_test_value = 'warn'
epsilon = 1e-6
dtype = theano.config.floatX
minibatch_size_g = 0
longest_seq_g = 0
voca_dim_g = 0
n_time_step_input_g = 0
n_timestep_target_g = 0
word_to_index_input_g = dict()
word_to_index_targrt_g = dict()
index_to_word_target_g = dict()
#########################################################################################################################
def shared(value, name=None):
return theano.shared(value.astype(dtype), name=name)
#########################################################################################################################
def shared_zeros(shape, name=None):
return shared(value=np.zeros(shape), name=name)
#########################################################################################################################
def shared_zeros_like(x, name=None):
return shared_zeros(shape=x.shape, name=name)
#########################################################################################################################
def init_weights(shape, name=None):
bound = np.sqrt(1.0/shape[1])
w = np.random.uniform(-bound, bound, shape)
return shared(value=w, name=name)
#########################################################################################################################
def adadelta(params, cost, lr=1.0, rho=0.95):
# from https://github.com/fchollet/keras/blob/master/keras/optimizers.py
cost = cost.astype('float32')
grads = T.grad(cost, params)
accus = [shared_zeros_like(p.get_value()) for p in params]
delta_accus = [shared_zeros_like(p.get_value()) for p in params]
updates = []
for p, g, a, d_a in zip(params, grads, accus, delta_accus):
new_a = rho * a + (1.0 - rho) * T.square(g)
updates.append((a, new_a))
update = g * T.sqrt(d_a + epsilon) / T.sqrt(new_a + epsilon)
new_p = p - lr * update
updates.append((p, new_p))
new_d_a = rho * d_a + (1.0 - rho) * T.square(update)
updates.append((d_a, new_d_a))
return updates
#########################################################################################################################
def categorical_crossentropy(y_true, y_pred):
# from https://github.com/fchollet/keras/blob/master/keras/objectives.py
y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
# y_true = y_true.reshape( (-1, minibatch_size_g, voca_dim_g) )
'''
cce = T.nnet.categorical_crossentropy(y_pred,y_true)
# only matrix can be calculated
'''
# Y_PRED SOFTMAX
y_pred = y_pred.reshape( (-1, voca_dim_g) )
# y_pred_flat = T.nnet.softmax(y_pred)
y_true = y_true.reshape( (-1, voca_dim_g) )
cce, updates = theano.scan(
fn=T.nnet.categorical_crossentropy,
sequences=[y_pred,y_true]
)
return T.mean(cce)
#########################################################################################################################
def mean_square_error(y_true, y_pred):
return T.mean(T.square(y_pred - y_true))
#########################################################################################################################
class LSTM(object):
def __init__(self, size, dim):
self.size = size
self.dim = dim
shape_b = (minibatch_size_g, size)
shape_U = (dim, size)
shape_W = (size, size)
self.h_tm1 = shared_zeros(shape_b, "h_tm1")
self.c_tm1 = shared_zeros(shape_b, "c_tm1")
self.Ui = init_weights(shape_U, "Ui")
self.Wi = init_weights(shape_W, "Wi")
self.bi = shared_zeros(shape_b, "bi")
self.Uf = init_weights(shape_U, "Uf")
self.Wf = init_weights(shape_W, "Wf")
self.bf = shared_zeros(shape_b, "bf")
self.Uo = init_weights(shape_U, "Uo")
self.Wo = init_weights(shape_W, "Wo")
self.bo = shared_zeros(shape_b, "bo")
self.Ug = init_weights(shape_U, "Ug")
self.Wg = init_weights(shape_W, "Wg")
self.bg = shared_zeros(shape_b, "bg")
self.params = [
self.Ui, self.Wi, self.bi,
self.Uf, self.Wf, self.bf,
self.Uo, self.Wo, self.bo,
self.Ug, self.Wg, self.bg
]
def set_state(self, h, c):
self.h_tm1.set_value(h.get_value())
self.c_tm1.set_value(c.get_value())
def reset_state(self):
self.h_tm1 = shared_zeros((1, self.size), "h_tm1")
self.c_tm1 = shared_zeros((1, self.size), "c_tm1")
#########################################################################################################################
#staticmethod
def step(
x_t, h_tm1, c_tm1,
Ui, Wi, bi, Uf, Wf, bf,
Uo, Wo, bo, Ug, Wg, bg
):
"""
x_t.shape = (timestep=1, dim)
x_t.shape = (n_samples, timestep=1, dim)
"""
i_t = T.nnet.sigmoid(T.dot(x_t, Ui) + T.dot(h_tm1, Wi) + bi)
f_t = T.nnet.sigmoid(T.dot(x_t, Uf) + T.dot(h_tm1, Wf) + bf)
o_t = T.nnet.sigmoid(T.dot(x_t, Uo) + T.dot(h_tm1, Wo) + bo)
g_t = T.tanh(T.dot(x_t, Ug) + T.dot(h_tm1, Wg) + bg)
c_t = c_tm1 * f_t + g_t * i_t
h_t = T.tanh(c_t) * o_t
return h_t, c_t
#########################################################################################################################
def forward(self, X):
states, updates = theano.scan(
fn=self.step,
sequences=[ X ],
outputs_info=[self.h_tm1, self.c_tm1],
non_sequences=[
self.Ui, self.Wi, self.bi,
self.Uf, self.Wf, self.bf,
self.Uo, self.Wo, self.bo,
self.Ug, self.Wg, self.bg
]
)
updates = [(self.h_tm1, states[0][-1]), (self.c_tm1, states[1][-1])]
return states, updates
#########################################################################################################################
class LSTMEncoder(LSTM):
def encode(self, X):
states, updates = self.forward(X)
h_t = states[0][-1]
c_t = states[1][-1]
return h_t, c_t, updates
class LSTMDecoder(LSTM):
def __init__(self, size, dim, h_tm1=None, c_tm1=None):
super(LSTMDecoder, self).__init__(size=size, dim=dim)
self.Wh = init_weights((size, dim), "Wh")
self.bh = shared_zeros((minibatch_size_g, dim), "bh")
self.h_tm1 = h_tm1 or shared_zeros((minibatch_size_g, size), "h_tm1")
self.c_tm1 = c_tm1 or shared_zeros((minibatch_size_g, size), "c_tm1")
self.y_t = shared_zeros((minibatch_size_g, dim), "y_t")
# self.decode_length = theano.shared(decode_length)
self.params.append(self.Wh)
self.params.append(self.bh)
def decode_step(
self, y_t, h_tm1, c_tm1,
Ui, Wi, bi, Uf, Wf, bf,
Uo, Wo, bo, Ug, Wg, bg,
Wh, bh
):
h_t, c_t = self.step(
y_t, h_tm1, c_tm1,
Ui, Wi, bi, Uf, Wf, bf,
Uo, Wo, bo, Ug, Wg, bg
)
y_t = T.dot(h_t, Wh) + bh
return y_t, h_t, c_t
def decode(self, h_tm1, c_tm1, timesteps):
outputs, updates = theano.scan(
fn=self.decode_step,
outputs_info=[self.y_t, h_tm1, c_tm1],
non_sequences=[
self.Ui, self.Wi, self.bi,
self.Uf, self.Wf, self.bf,
self.Uo, self.Wo, self.bo,
self.Ug, self.Wg, self.bg,
self.Wh, self.bh
],
n_steps=timesteps
)
updates = [
(self.h_tm1, outputs[1][-1]),
(self.c_tm1, outputs[2][-1])
]
# return T.flatten(outputs[0], 3), updates
return outputs[0], updates
#staticmethod
def argmax(seq):
seq = T.argmax(seq, axis=2)
return seq
#########################################################################################################################
class Seq2Seq(object):
def __init__(self, size, dim):
self.encoder = LSTMEncoder(size, dim)
self.decoder = LSTMDecoder(size, dim)
self.params = []
self.params += self.encoder.params
self.params += self.decoder.params
self._predict = None
self._train = None
self._test = None
def compile(self, loss_func, optimizer):
seq_input = T.tensor3()
seq_target = T.tensor3()
decode_timesteps = T.iscalar()
h_tm1, c_tm1, updates_encode = self.encoder.encode(seq_input)
seq_predict_flex, updates_decode_flex = self.decoder.decode(h_tm1, c_tm1, decode_timesteps)
seq_argmax = self.decoder.argmax(seq_predict_flex)
seq_predict, updates_decode = self.decoder.decode(h_tm1, c_tm1, T.shape(seq_target)[0])
loss = loss_func(seq_predict, seq_target)
self._predict = theano.function([seq_input, decode_timesteps], seq_argmax, updates=updates_encode+updates_decode_flex)
self._test = theano.function([seq_input, seq_target], loss, updates=updates_encode+updates_decode)
updates = []
updates += updates_encode
updates += updates_decode
updates += optimizer(self.params, loss)
self._train = theano.function([seq_input, seq_target], loss, updates = updates)
def predict(self, seq_input, decode_timesteps):
self.encoder.reset_state()
self.decoder.reset_state()
return self._predict(seq_input, decode_timesteps)
def train(self, seq_input, seq_target):
self.encoder.reset_state()
self.decoder.reset_state()
return self._train(seq_input, seq_target)
def test(self, seq_input, seq_target):
self.encoder.reset_state()
self.decoder.reset_state()
return self._test(seq_input, seq_target)
#########################################################################################################################
def train(x, target):
for mini_batch, target in zip(x,target):
mini_batch = mini_batch.astype(dtype)
target = target.astype(dtype)
print("result of train function(loss or update) :", seq2seq.train(mini_batch, target))
#########################################################################################################################
# make weight information to pickle file
# information of Encooder class and decoder class of Seq2Seq class
# Encooder and decoder class should have function that returns value of weight variables
# one list contains elements that save weights' information
def save_weight():
None
#########################################################################################################################
def gen_processed_seq(input_sentence):
tokenized_seq = nltk.word_tokenize( input_sentence )
input_sentences = [ None for _ in range(1) ]
input_sentences[0] = tokenized_seq
seq_input = readFile.word_to_idx(input_sentences, word_to_index_input_g )
sorted_seq_input = [ None for _ in range(minibatch_size_g) ]
sorted_seq_input[0] = seq_input[0]
input_len = len(seq_input[0])
for i in range(minibatch_size_g-1):
for j in range(input_len):
sorted_seq_input[i+1] = [-1]
input_finally = []
input_finally.append(sorted_seq_input)
return input_finally
#########################################################################################################################
def gen_one_hot(input_len, input_seq):
one_hot = readFile.seq_to_1hot(n_time_step_input_g, input_seq, "predict", 1, 1)
one_hot[0] = one_hot[0].astype(dtype)
print("one_hot : ", one_hot)
return one_hot
def get_idx(argmax, num_of_word):
idx_list = argmax[ : num_of_word, 0]
return idx_list
#########################################################################################################################
def predict():
input_sentence = raw_input("Input the English Sentence You Want to Translate into Spanish : ")
input_seq = gen_processed_seq(input_sentence)
print("input_seq[0][0] : ",input_seq[0][0])
num_of_word = len(input_seq[0][0])
one_hot = gen_one_hot(n_time_step_input_g, input_seq)
argmax = seq2seq.predict(one_hot[0] , n_time_step_input_g )
print("argmax_fin shape : ", argmax.shape)
print("argmax_fin : ", argmax)
idx_list_np = get_idx(argmax, num_of_word)
idx_list_py = idx_list_np.tolist()
print("index_to_word_target_g : ",index_to_word_target_g)
print("index_to_word_target_g[6] :", index_to_word_target_g[6])
result = readFile.idx_to_word(idx_list_py, index_to_word_target_g)
translated = ""
for elem in result :
translated += elem
translated += " "
print("translated : " , translated)
print("Translation End")
#########################################################################################################################
def gen_global_var(word_to_index_input, word_to_index_targrt, voca_dim, si, st, index_to_word_target):
global word_to_index_input_g
global word_to_index_targrt_g
global voca_dim_g
global minibatch_size_g
global n_time_step_input_g
global n_timestep_target_g
global index_to_word_target_g
word_to_index_input_g = word_to_index_input
word_to_index_targrt_g = word_to_index_targrt
voca_dim_g = voca_dim + 2
minibatch_size_g = si[0].shape[1]
n_time_step_input_g = si[0].shape[0]
n_timestep_target_g = st[0].shape[0]
index_to_word_target_g = index_to_word_target
return
#########################################################################################################################
def menu(si, st):
None
#########################################################################################################################
def gen_object():
return None
#########################################################################################################################
if __name__ == "__main__":
si, st, maxlen_input, minibatch_size, voca_dim, word_to_index_input, word_to_index_targrt, index_to_word_target = readFile.preprocessing()
gen_global_var(word_to_index_input, word_to_index_targrt, voca_dim, si, st, index_to_word_target)
seq2seq = Seq2Seq(n_time_step_input_g, voca_dim_g )
seq2seq.compile(loss_func=categorical_crossentropy, optimizer=adadelta)
while(True):
print("select a menu")
print("1. Training")
print("2. Translate specific English sentence into Spanish.")
val = input("selection : ")
if val == 1:
train(si, st)
elif val == 2:
predict()
and readfile.py is
import numpy as np
import itertools
import nltk
import sys
import os
from nltk.tokenize import sent_tokenize
import codecs
unknown_token = 'UNKNOWN_TOKEN'
start_token = '_S'
end_token = '__E'
num_of_seq = 0
input_path = "./europarl-v7.es-en.en"
target_path = "./europarl-v7.es-en.es"
minibatch_unit = 100
voca_dim = 3000
SEQ_NUM_LIMIT = 1000
##########################################################################################
def file_tokenize(file):
f = codecs.open( file, "r", "utf-8" )
tokenized_seq = []
sentences = []
total_sentence_num = 0
# sequence tokenize
for i,line in enumerate(f):
print("tokenized Sentence No." , i)
# strip() method to remove the newline character at the end of the input line.
tokenized_seq = nltk.word_tokenize( line.strip() )
tokenized_seq.insert(0, start_token)
tokenized_seq.append(end_token)
sentences.append(tokenized_seq)
total_sentence_num += 1;
if(total_sentence_num == SEQ_NUM_LIMIT):
break
return sentences,total_sentence_num
##########################################################################################
# Count the word frequencies
def cntWordFreq(sentences):
word_freq = nltk.FreqDist(itertools.chain(*sentences))
return word_freq
##########################################################################################
# Get the most common words and build index_to_word and word_to_index vectors
def build_WordToIdx_IdxtoWord(word_freq):
vocab = word_freq.most_common(voca_dim-1)
index_to_word = [x[0] for x in vocab]
index_to_word.append(unknown_token)
word_to_index = dict([(w,i) for i,w in enumerate(index_to_word)])
return index_to_word, word_to_index
##########################################################################################
# change word to index
def word_to_idx(sequences, word_to_index ) :
for i, sent in enumerate(sequences):
sequences[i] = [w if w in word_to_index else unknown_token for w in sent]
sequences[i] = [word_to_index[w] if w in word_to_index else -1 for w in sequences[i]]
return sequences
##########################################################################################
def idx_to_word(seq, index_to_word):
for i, sent in enumerate(seq):
seq[i] = index_to_word[sent]
#seq[i] = [index_to_word[sent] if sent in index_to_word else '?' ]
return seq
##########################################################################################
def sortByLen(seqs_input, seqs_target) :
# check maximum sentence length
max_len_input = 0
max_len_target = 0
for sentence in seqs_input :
tmp = len(sentence)
if max_len_input < tmp:
max_len_input = tmp
for sentence in seqs_target :
tmp = len(sentence)
if max_len_target < tmp:
max_len_target = tmp
seqs_sorted_input = [ [] for _ in range(max_len_input+1) ]
seqs_sorted_target = [ [] for _ in range(max_len_input+1) ]
i = 0
for sentence_input, sentence_target in zip(seqs_input, seqs_target) :
sentence_len = len(sentence_input)
seqs_sorted_input[sentence_len].append(sentence_input)
seqs_sorted_target[sentence_len].append(sentence_target)
i+=1
return seqs_sorted_input, seqs_sorted_target, max_len_input, max_len_target
##########################################################################################
def find_maxlen(sentence_group):
max_seq_len = 0
for seq in sentence_group :
if len(seq) > max_seq_len :
max_seq_len = len(seq)
return max_seq_len
##########################################################################################
def sort_by_timestep(sentence_group):
same_len_seq = np.asarray(sentence_group)
same_len_seq = apply_to_m1(same_len_seq)
sorted_seq = same_len_seq.transpose()
return sorted_seq
##########################################################################################
def seq_to_1hot(max_len, sorted_sentences, type, minibatch_unit, num_of_seq):
one_hot = [None for _ in range( len(sorted_sentences) )]
for i, sentence_group in enumerate(sorted_sentences):
if sentence_group and len(sentence_group[0]) != 0 :
max_seq_len = find_maxlen(sentence_group)
row = max_seq_len * minibatch_unit
one_hot[i] = np.zeros( (row, voca_dim + 2) )
time_step_seq = sort_by_timestep(sentence_group)
j = 0
for word_idx in np.nditer( time_step_seq ) :
if word_idx != -1:
one_hot[i][j][word_idx] = 1
j+=1
one_hot[i] = np.reshape(one_hot[i], ( max_seq_len, -1, voca_dim+2) )
return one_hot
##########################################################################################
def apply_to_m1(lst, dtype=np.int64):
inner_max_len = max(map(len, lst))
result = np.zeros( [len(lst), inner_max_len], dtype )
result[:] = -1
for i, row in enumerate(lst):
for j, val in enumerate(row):
result[i][j] = val
return result
##########################################################################################
def seq_group_by_mini_batch_size(minibatch_unit, sorted_seq, num_of_seq):
idx = 0
cnt = 0
minibatch_seq = [ [] for _ in range( (num_of_seq/minibatch_unit)+1) ]
for seqs in sorted_seq :
if seqs :
for seq in seqs :
if seq:
minibatch_seq[idx].append(seq)
cnt+=1
if minibatch_unit == cnt:
cnt = 0
idx+= 1
for i, seq in enumerate (minibatch_seq):
if seq == []:
minibatch_seq = minibatch_seq[: i- 1]
break
return minibatch_seq
##########################################################################################
def preprocessing():
global num_of_seq
global minibatch_unit
global input_path
global target_path
print("Start Preprocessing")
sentences_input, total_sentence_num = file_tokenize(input_path)
sentences_target, total_sentence_num_target = file_tokenize(target_path)
print("FINISHED : file_tokenize ")
word_freq_input = cntWordFreq(sentences_input)
word_freq_target = cntWordFreq(sentences_target)
print("FINISHED : cntWordFreq ")
index_to_word_input, word_to_index_input = build_WordToIdx_IdxtoWord(word_freq_input)
index_to_word_target, word_to_index_targrt = build_WordToIdx_IdxtoWord(word_freq_target)
print("FINISHED : build_WordToIdx_IdxtoWord ")
seqs_input = word_to_idx(sentences_input, word_to_index_input)
seqs_target = word_to_idx(sentences_target, word_to_index_targrt)
print("FINISHED : word_to_idx ")
seqs_sorted_input, seqs_sorted_target, maxlen_input, maxlen_target = sortByLen(seqs_input, seqs_target)
print("FINISHED : sortByLen ")
for seqs in seqs_input:
if seqs:
for seq in seqs:
if seq:
num_of_seq+=1
seq_by_mini_batch_size_input = seq_group_by_mini_batch_size(minibatch_unit, seqs_sorted_input, num_of_seq)
seq_by_mini_batch_size_target = seq_group_by_mini_batch_size(minibatch_unit, seqs_sorted_target, num_of_seq)
print("FINISHED : seq_group_by_mini_batch_size ")
_1hot_input = seq_to_1hot(maxlen_input, seq_by_mini_batch_size_input, "input",minibatch_unit, num_of_seq)
_1hot_target = seq_to_1hot(maxlen_target, seq_by_mini_batch_size_target, "target",minibatch_unit, num_of_seq)
print("FINISHED : seq_to_1hot ")
if minibatch_unit > total_sentence_num:
minibatch_unit = total_sentence_num
print("exit preprocessing")
return _1hot_input, _1hot_target, maxlen_input, minibatch_unit, voca_dim, word_to_index_input, word_to_index_targrt, index_to_word_target

Resources