how to assign value to tensor based on tensor index - pytorch

import torch
couplings_lookup = torch.tensor(range(1,901)).reshape(6,6,5,5)
fccl = torch.tensor([1,2,3,4,5,0])
xx = torch.tensor([3,2,2,2,1,0])
coupling = []
for idx, pos in enumerate(fccl):
ccoupling_mask = torch.zeros(couplings_lookup.shape)
ccoupling_mask[fccl[:idx],pos,xx[:idx],:] = 1
ccoupling = torch.sum(couplings_lookup * ccoupling_mask, axis=(0,1,2))
coupling.append(ccoupling)
print(ccoupling)
As the sample shows above, how could I convert the “for” loop into tensor operation in pytorch or einsum making the program run faster ? Or how to think about problem like this ?

Related

How can I interpolate values from two lists (in Python)?

I am relatively new to coding in Python. I have mainly used MatLab in the past and am used to having vectors that can be referenced explicitly rather than appended lists. I have a script where I generate a list of x- and y- (z-, v-, etc) values. Later, I want to interpolate and then print a table of the values at specified points. Here is a MWE. The problem is at line 48:
yq = interp1d(x_list, y_list, xq(nn))#interp1(output1(:,1),output1(:,2),xq(nn))
I'm not sure I have the correct syntax for the last two lines either:
table[nn] = ('%.2f' %xq, '%.2f' %yq)
print(table)
Here is the full script for the MWE:
#This script was written to test how to interpolate after data was created in a loop and stored as a list. Can a list be accessed explicitly like a vector in matlab?
#
from scipy.interpolate import interp1d
from math import * #for ceil
from astropy.table import Table #for Table
import numpy as np
# define the initial conditions
x = 0 # initial x position
y = 0 # initial y position
Rmax = 10 # maxium range
""" initializing variables for plots"""
x_list = [x]
y_list = [y]
""" define functions"""
# not necessary for this MWE
"""create sample data for MWE"""
# x and y data are calculated using functions and appended to their respective lists
h = 1
t = 0
tf = 10
N=ceil(tf/h)
# Example of interpolation without a loop: https://docs.scipy.org/doc/scipy/tutorial/interpolate.html#d-interpolation-interp1d
#x = np.linspace(0, 10, num=11, endpoint=True)
#y = np.cos(-x**2/9.0)
#f = interp1d(x, y)
for i in range(N):
x = h*i
y = cos(-x**2/9.0)
""" appends selected data for ability to plot"""
x_list.append(x)
y_list.append(y)
## Interpolation after x- and y-lists are already created
intervals = 0.5
nfinal = ceil(Rmax/intervals)
NN = nfinal+1 # length of table
dtype = [('Range (units?)', 'f8'), ('Drop? (units)', 'f8')]
table = Table(data=np.zeros(N, dtype=dtype))
for nn in range(NN):#for nn = 1:NN
xq = 0.0 + (nn-1)*intervals #0.0 + (nn-1)*intervals
yq = interp1d(x_list, y_list, xq(nn))#interp1(output1(:,1),output1(:,2),xq(nn))
table[nn] = ('%.2f' %xq, '%.2f' %yq)
print(table)
Your help and patience will be greatly appreciated!
Best regards,
Alex
Your code has some glaring issues that made it really difficult to understand. Let's first take a look at some things I needed to fix:
for i in range(N):
x = h*1
y = cos(-x**2/9.0)
""" appends selected data for ability to plot"""
x_list.append(x)
y_list.append(y)
You are appending a single value without modifying it. What I presume you wanted is down below.
intervals = 0.5
nfinal = ceil(Rmax/intervals)
NN = nfinal+1 # length of table
dtype = [('Range (units?)', 'f8'), ('Drop? (units)', 'f8')]
table = Table(data=np.zeros(N, dtype=dtype))
for nn in range(NN):#for nn = 1:NN
xq = 0.0 + (nn-1)*intervals #0.0 + (nn-1)*intervals
yq = interp1d(x_list, y_list, xq(nn))#interp1(output1(:,1),output1(:,2),xq(nn))
table[nn] = ('%.2f' %xq, '%.2f' %yq)
This is where things get strange. First: use pandas tables, this is the more popular choice. Second: I have no idea what you are trying to loop over. What I presume you wanted was to vary the number of points for the interpolation, which I have done so below. Third: you are trying to interpolate a point, when you probably want to interpolate over a range of points (...interpolation). Lastly, you are using the interp1d function incorrectly. Please take a look at the code below or run it here; let me know what you exactly wanted (specifically: what should xq / xq(nn) be?), because the MRE you provided is quite confusing.
from scipy.interpolate import interp1d
from math import *
import numpy as np
Rmax = 10
h = 1
t = 0
tf = 10
N = ceil(tf/h)
x = np.arange(0,N+1)
y = np.cos(-x**2/9.0)
interval = 0.5
NN = ceil(Rmax/interval) + 1
ip_list = np.arange(1,interval*NN,interval)
xtable = []
ytable = []
for i,nn in enumerate(ip_list):
f = interp1d(x,y)
x_i = np.arange(0,nn+interval,interval)
xtable += [x_i]
ytable += [f(x_i)]
[print(i) for i in xtable]
[print(i) for i in ytable]

perform principal component analysis on moving window of data

This was partly answered by #WhoIsJack but not completely solved given the errors I get. Basically, I'm trying to perform principal component analysis on a rolling window of data. For example, I'd run PCA on the last 200 days in the df, move forward 1 day and do PCA again on the last 200 days. So as you move forward each day, you'd include the next day's measurement and exclude the last measurement.
You have a random df:
data = np.random.random(size=(1000,10))
df = pd.DataFrame(data)
Here's window size:
window = 200
Initialize an empty df of appropriate size for the output
df_pca = pd.DataFrame( np.zeros((data.shape[0] - window + 1, data.shape[1])) )
Define PCA fit-transform function. Instead of attempting to return the result, it is written into the previously created output array.
def rolling_pca(window_data):
pca = PCA()
transf = pca.fit_transform(df.iloc[window_data])
df_pca.iloc[int(window_data[0])] = transf[0,:]
return True
Create a df containing row indices for the workaround
df_idx = pd.DataFrame(np.arange(df.shape[0]))
Use rolling to apply the PCA function
_ = df_idx.rolling(window).apply(rolling_pca)
The results should be contained here:
print(df_pca)
However, when I generate the results only the first row of data looks to contain PCAs while the rest of the rows are zero.
I also tried the following function:
def rolling_pca(x, window):
r = x.rolling(window=window)
pca = PCA(3)
y = pca.fit(r)
z = pca.fit_transform(y)
return z
window = 200
Which I thought would generate a new df with rolling PCAs:
data = df.apply(rolling_pca, window=window)
But I got the following error: setting an array element with a sequence.
I've also tried manually calculating with below. I get: "unsupported operand type(s) for /: 'Rolling' and 'int'"
def rolling_pca(x, window):
# create rolling dataframe
r = x.rolling(window=window)
# demand data
X = np.matrix(r)
X_dm = X - np.mean(X, axis = 0)
#Eigenvalue decomposition (of covariance matrix)
Cov_X = np.cov(X_dm, rowvar = False)
eigen = np.linalg.eig(Cov_X)
eig_values_X = np.matrix(eigen[0])
eig_vectors_X = np.matrix(eigen[1])
#transformed data
Y_dm = X_dm * eig_vectors_X
#assign transformed yields
yields_trans = Y_dm.copy()
# get PCs
pc1_yields = x.copy()
pcas = yields_trans[:,0:3]
return pcas
#assign window length
window = 300
rolling_pca(data, window=window)
And tried below. Get error: "LinAlgError: 0-dimensional array given. Array must be at least two-dimensional"
def pca(x):
# demand data
X = np.matrix(x.values)
X_dm = X - np.mean(X, axis = 0)
#Eigenvalue decomposition (of covariance matrix)
Cov_X = np.cov(X_dm, rowvar = False)
eigen = np.linalg.eig(Cov_X)
eig_values_X = np.matrix(eigen[0])
eig_vectors_X = np.matrix(eigen[1])
#transformed data
Y_dm = X_dm * eig_vectors_X
#assign transformed yields
yields_trans = Y_dm.copy()
# get 3 PCs
pcas = yields_trans[:,0:3]
final_pcas = pd.DataFrame(pcas)
return final_pcas
data.rolling(200).apply(pca)
Any thoughts would be appreciated!

How to create an initial population of 20 chromosomes with Python 3

I'm trying to generate the initial population for a genetic algorithm. I need to generate 20 random binary strings of length 18. I have been able to generate just one chain. My question is: How do I use another loop in order to generate the 20 strings that I need?
I think that this could solved using nested loops. I've tried to do that but I don't know how to use them correctly.
import random
binaryString = []
for i in range(0, 18):
x = str(random.randint(0, 1))
binaryString.append(x)
print (''.join(binaryString))
import numpy as geek
num_bits = 18
individualsPer_pop = 20
#Defining the population size
pop_size = (individualsPer_pop,num_bits) # The population will have
individualsPer-pop chromosome where each chromosome has num_bits genes.
#Creating the initial population.
new_population = geek.random.randint(low = 0, high = 2, size = pop_size)
print(new_population)

Finding conditional mutual information from 3 discrete variable

I am trying to find conditional mutual information between three discrete random variable using pyitlib package for python with the help of the formula:
I(X;Y|Z)=H(X|Z)+H(Y|Z)-H(X,Y|Z)
The expected Conditional Mutual information value is= 0.011
My 1st code:
import numpy as np
from pyitlib import discrete_random_variable as drv
X=[0,1,1,0,1,0,1,0,0,1,0,0]
Y=[0,1,1,0,0,0,1,0,0,1,1,0]
Z=[1,0,0,1,1,0,0,1,1,0,0,1]
a=drv.entropy_conditional(X,Z)
##print(a)
b=drv.entropy_conditional(Y,Z)
##print(b)
c=drv.entropy_conditional(X,Y,Z)
##print(c)
p=a+b-c
print(p)
The answer i am getting here is=0.4632245116328402
My 2nd code:
import numpy as np
from pyitlib import discrete_random_variable as drv
X=[0,1,1,0,1,0,1,0,0,1,0,0]
Y=[0,1,1,0,0,0,1,0,0,1,1,0]
Z=[1,0,0,1,1,0,0,1,1,0,0,1]
a=drv.information_mutual_conditional(X,Y,Z)
print(a)
The answer i am getting here is=0.1583445441575102
While the expected result is=0.011
Can anybody help? I am in big trouble right now. Any kind of help will be appreciable.
Thanks in advance.
I think that the library function entropy_conditional(x,y,z) has some errors. I also test my samples, the same problem happens.
however, the function entropy_conditional with two variables is ok.
So I code my entropy_conditional(x,y,z) as entropy(x,y,z), the results is correct.
the code may be not beautiful.
def gen_dict(x):
dict_z = {}
for key in x:
dict_z[key] = dict_z.get(key, 0) + 1
return dict_z
def entropy(x,y,z):
x = np.array([x,y,z]).T
x = x[x[:,-1].argsort()] # sorted by the last column
w = x[:,-3]
y = x[:,-2]
z = x[:,-1]
# dict_w = gen_dict(w)
# dict_y = gen_dict(y)
dict_z = gen_dict(z)
list_z = [dict_z[i] for i in set(z)]
p_z = np.array(list_z)/sum(list_z)
pos = 0
ent = 0
for i in range(len(list_z)):
w = x[pos:pos+list_z[i],-3]
y = x[pos:pos+list_z[i],-2]
z = x[pos:pos+list_z[i],-1]
pos += list_z[i]
list_wy = np.zeros((len(set(w)),len(set(y))), dtype = float , order ="C")
list_w = list(set(w))
list_y = list(set(y))
for j in range(len(w)):
pos_w = list_w.index(w[j])
pos_y = list_y.index(y[j])
list_wy[pos_w,pos_y] += 1
#print(pos_w)
#print(pos_y)
list_p = list_wy.flatten()
list_p = np.array([k for k in list_p if k>0]/sum(list_p))
ent_t = 0
for j in list_p:
ent_t += -j * math.log2(j)
#print(ent_t)
ent += p_z[i]* ent_t
return ent
X=[0,1,1,0,1,0,1,0,0,1,0,0]
Y=[0,1,1,0,0,0,1,0,0,1,1,0]
Z=[1,0,0,1,1,0,0,1,1,0,0,1]
a=drv.entropy_conditional(X,Z)
##print(a)
b=drv.entropy_conditional(Y,Z)
c = entropy(X, Y, Z)
p=a+b-c
print(p)
0.15834454415751043
Based on the definitions of conditional entropy, calculating in bits (i.e. base 2) I obtain H(X|Z)=0.784159, H(Y|Z)=0.325011, H(X,Y|Z) = 0.950826. Based on the definition of conditional mutual information you provide above, I obtain I(X;Y|Z)=H(X|Z)+H(Y|Z)-H(X,Y|Z)= 0.158344. Noting that pyitlib uses base 2 by default, drv.information_mutual_conditional(X,Y,Z) appears to be computing the correct result.
Note that your use of drv.entropy_conditional(X,Y,Z) in your first example to compute conditional entropy is incorrect, you can however use drv.entropy_conditional(XY,Z), where XY is a 1D array representing the joint observations about X and Y, for example XY = [2*xy[0] + xy[1] for xy in zip(X,Y)].

AttributeError: 'DType' object has no attribute 'type' Tensorflow Serving

I am trying to use a function (from another module) inside tensorflow. The function accepts a numpy array and returns the changepoints. My main goal is to deploy this model on tensorflow serving. I am running into error
AttributeError: 'DType' object has no attribute 'type'
There are 2 functions, one is create_data() that creates a numpy array and returns it, another is change() which accepts numpy array and uses the before mentioned function to return changepoints. I have created a placeholder to accept input data, an operation to execute the function. Problem is, if i try to send data through placeholder, i run into error. If i send the data directly into the function, it runs. Following is my code.
def create_data():
np.random.seed(0)
size = 100
mean_a = 0.0
mean_b = 10.0
mean_c = 0
var = 0.1
data_a = np.random.normal(mean_a, var, size)
data_b = np.random.normal(mean_b, var, size)
data_c = np.random.normal(mean_c, var, size)
data = np.concatenate([data_a, data_b, data_c])
return data
def change(data):
# what else i tried
# data = np.array(data, dtype=np.float)
# above line gives another error mentioned after code
cpts = (pelt(normal_mean(x, np.var(x)), len(x)))
return cpts
sess = tf.Session()
x = tf.placeholder(tf.float32, shape=[300, ], name="myInput")
y = tf.convert_to_tensor(change(x),np.float32,name="myOutput")
z = sess.run(y,feed_dict={x:create_data()})
If i try the code data = np.array(data, dtype=np.float) in the function change(), it gives me error
ValueError: setting an array element with a sequence.
I also tried data = np.hstack((data)).astype(np.float) and data = np.vstack((data)).astype(np.float) but it runs into a separate error that says use tf.map_fn. I also tried to use tf.eval() to convert the numbers but i couldn't get them to run inside a function with placeholders.
But if i send in the output directly,
y = tf.convert_to_tensor(change(create_data()),np.float32,name="myOutput")
It works.
How should i send in the input to make it work?
EDIT: The function in question is this if anyone wants to know.
This error is raised when you try to pass a Tensor into a numpy function
You need to use tf.py_func to include python function into tensorflow graph
(also, your change() functin uses data as argument instead of x)
Here is the code that worked for me
import numpy as np
import tensorflow as tf
from changepy import pelt
from changepy.costs import normal_mean
def create_data():
np.random.seed(0)
size = 100
mean_a = 0.0
mean_b = 10.0
mean_c = 0
var = 0.1
data_a = np.random.normal(mean_a, var, size)
data_b = np.random.normal(mean_b, var, size)
data_c = np.random.normal(mean_c, var, size)
data = np.concatenate([data_a, data_b, data_c])
return data
def change(x):
# what else i tried
# data = np.array(data, dtype=np.float)
# above line gives another error mentioned after code
cpts = (pelt(normal_mean(x, np.var(x)), len(x)))
return cpts
sess = tf.Session()
x = tf.placeholder(tf.float32, shape=[300, ], name="myInput")
y = tf.convert_to_tensor(tf.compat.v1.py_func(change, [x], 3*[tf.int64]),np.float32,name="myOutput")
z = sess.run(y,feed_dict={x:create_data()})
print(z)

Resources