adding 2 more dimensions to tensor - pytorch

I am getting the following error message:
RuntimeError: w groups=3, expected weight to be at least 3 at dimension 0, but got weight of size [1, 1, 2, 2] instead
when i try to convolve a image with a filter using the "functional version of conv2d"
i know why i am receiving this error message... it is because i need to have 3 dimensions in channel 0. But i have now i idea how to add two more dimensions.
I have flailed around for quite some time trying to add two more, but i cant figure it out. I want to the kernal applied one all color channels... so i just want to replicate it 2 more times.
import torch.nn as nn
import torch
import torch.nn.functional as nnf
from PIL import Image
from torchvision import transforms
img = Image.open("GOPR0305.jpg")
preprocess = transforms.Compose([transforms.ToTensor()])
img_t = preprocess(img)
img_t = torch.unsqueeze(img_t, 0)
hci = [1, -1]
hri = [-1, 1]
hc = [1.0, -1.0]
hr = [-1.0, 1.0]
lc = [0.5, 0.5]
lr = [0.5, 0.5]
hh_k = torch.tensor([hc ,hr])[None, None, ...]
hl_k = torch.tensor([hc ,lr])[None, None, ...]
lh_k = torch.tensor([lc ,hr])[None, None, ...]
ll_k = torch.tensor([lc ,lr])[None, None, ...]
in_t = torch.tensor([ [14, 7, 6, 2,] , [4 ,8 ,11 ,1], [3, 5, 9 ,10], [12, 15, 16, 13] ])[None, None, ...]
in_t = torch.tensor([ [14.0, 7.0, 6.0, 2.0,] , [4.0 ,8.0 ,11.0 ,1.0], [3.0, 5.0, 9.0 ,10.0], [12.0, 15.0, 16.0, 13.0] ])[None, None, ...]
def wave_haar(in_t):
hh = nnf.conv2d(in_t, hh_k,stride=2,groups=3)
ll = nnf.conv2d(in_t, ll_k,stride=2)
hl = nnf.conv2d(in_t, hl_k,stride=2)
lh = nnf.conv2d(in_t, lh_k,stride=2)
return [ll,hl,lh,hh]
[ll,hl,lh,hh] = wave_haar(img_t[:,2:])
print(img_t.shape)
print(img_t.size())
print(img_t)
print(img_t.shape)
print(ll.shape)

changing the group of the first layer to 1 fixed the problem for me like this
def wave_haar(in_t):
hh = nnf.conv2d(in_t, hh_k,stride=2,groups=1)
ll = nnf.conv2d(in_t, ll_k,stride=2)
hl = nnf.conv2d(in_t, hl_k,stride=2)
lh = nnf.conv2d(in_t, lh_k,stride=2)
return [ll,hl,lh,hh]

I was able to solve this, by chaning the kernal to be 3-dimensions:
from
hh_k = torch.tensor([hc ,hr])[None, None, ...]
hh_k3 = torch.tensor([[[hc, hr]],[[hc, hr]],[[hc, hr]]])

Related

Adding image generated from another library as inset in matplotlib

I've generated a network figure using vedo library and I'm trying to add this as an inset to a figure generated in matplotlib
import networkx as nx
import matplotlib.pyplot as plt
from vedo import *
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
G = nx.gnm_random_graph(n=10, m=15, seed=1)
nxpos = nx.spring_layout(G, dim=3, seed=1)
nxpts = [nxpos[pt] for pt in sorted(nxpos)]
nx_lines = [(nxpts[i], nxpts[j]) for i, j in G.edges()]
pts = Points(nxpts, r=12)
edg = Lines(nx_lines).lw(2)
# node values
values = [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[30, 80, 10, 79, 70, 60, 75, 78, 65, 10],
[1, .30, .10, .79, .70, .60, .75, .78, .65, .90]]
time = [0.0, 0.1, 0.2] # in seconds
vplt = Plotter(N=1)
pts1 = pts.cmap('Blues', values[0])
vplt.show(
pts1, edg,
axes=False,
bg='white',
at=0,
interactive=False,
zoom=1.5
).screenshot("network.png")
ax = plt.subplot(111)
ax.plot(
[1, 2, 3], [1, 2, 3],
'go-',
label='line 1',
linewidth=2
)
arr_img = vplt.screenshot(returnNumpy=True, scale=1)
im = OffsetImage(arr_img, zoom=0.25)
ab = AnnotationBbox(im, (1, 0), xycoords='axes fraction', box_alignment=(1.1, -0.1), frameon=False)
ax.add_artist(ab)
plt.show()
ax.figure.savefig(
"output.svg",
transparent=True,
dpi=600,
bbox_inches="tight"
)
There resolution of the image in the inset is too low. Suggestions on how to add the inset without loss of resolution will be really helpful.
EDIT:
The answer posted below works for adding a 2D network, but I am still looking for ways that will be useful for adding a 3D network in the inset.
I am not familiar with vedo but the general procedure would be to create an inset_axis and plot the image with imshow. However, your code is using networkx which has matplotlib bindings and you can directly do this without vedo
EDIT: code edited for 3d plotting
import networkx as nx
import matplotlib.pyplot as plt
G = nx.gnm_random_graph(n=10, m=15, seed=1)
nxpos = nx.spring_layout(G, dim=3, seed=1)
nxpts = [nxpos[pt] for pt in sorted(nxpos)]
nx_lines = [(nxpts[i], nxpts[j]) for i, j in G.edges()]
# node values
values = [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[30, 80, 10, 79, 70, 60, 75, 78, 65, 10],
[1, .30, .10, .79, .70, .60, .75, .78, .65, .90]]
time = [0.0, 0.1, 0.2] # in seconds
fig, ax = plt.subplots()
ax.plot(
[1, 2, 3], [1, 2, 3],
'go-',
label='line 1',
linewidth=2
)
from mpl_toolkits.mplot3d import (Axes3D)
from matplotlib.transforms import Bbox
rect = [.6, 0, .5, .5]
bbox = Bbox.from_bounds(*rect)
inax = fig.add_axes(bbox, projection = '3d')
# inax = add_inset_axes(,
# ax_target = ax,
# fig = fig, projection = '3d')
# inax.axis('off')
# set angle
angle = 25
inax.view_init(10, angle)
# hide axes, make transparent
# inax.set_facecolor('none')
# inax.grid('off')
import numpy as np
# plot 3d
seen = set()
for i, j in G.edges():
x = np.stack((nxpos[i], nxpos[j]))
inax.plot(*x.T, color = 'k')
if i not in seen:
inax.scatter(*x[0], color = 'skyblue')
seen.add(i)
if j not in seen:
inax.scatter(*x[1], color = "skyblue")
seen.add(j)
fig.show()

Is there any Softmax implementation with sections along the dim (blocky Softmax) in PyTorch?

For example, given logits, dim, and boundary,
boundary = torch.tensor([[0, 3, 4, 8, 0]
[1, 3, 5, 7, 9]]
# representing sections look like:
# [[00012222_]
# [_00112233]
# in shape: (2, 9)
# (sections cannot be sliced)
logits = torch.rand(2, 9, 100)
result = blocky_softmax(logits, dim = 1, boundary = boundary)
# result[:, :, 0] may look like:
# [[0.33, 0.33, 0.33, 1.00, 0.25, 0.25, 0.25, 0.25, 0.0 ]
# [0.0, 0.50, 0.50, 0.50, 0.50, 0.50, 0.50, 0.50, 0.50]]
# other 99 slices looks similar with each blocks sum to 1.
we hope the Softmax is applied to dim = 1, but sections are also applied to this dim.
My current implementation with PyTorch is using for. It is slow and cost too much memory,
which looks like:
def blocky_softmax(logits, splits, map_inf_to = None):
_, batch_len, _ = logits.shape
exp_logits = logits.exp() # [2, 9, 100]
batch_seq_idx = torch.arange(batch_len, device = logits.device)[None, :]
base = torch.zeros_like(logits)
_, n_blocks = splits.shape
for nid in range(1, n_blocks):
start = splits[:, nid - 1, None]
end = splits[:, nid, None]
area = batch_seq_idx >= start
area &= batch_seq_idx < end
area.unsqueeze_(dim = 2)
blocky_z = area * blocky_z
base = base + blocky_z
if map_inf_to is not None:
good_base = base > 0
ones = torch.ones_like(base)
base = torch.where(good_base, base, ones)
exp_logits = torch.where(good_base, exp_logits, ones * map_inf_to)
return exp_logits / base
This implementation is slowed and fattened by n_blocks times. But it could be parallel with each section.
If there is no off-the-shelf function, should I write a CUDA/C++ library? I hope you could help with my issue.
For further generalization, I hope there are discontinuities in boundary/sections.
sections = torch.tensor([[ 0, 0, 0, -1, 2, 3, 2, 3, 0, 3]
[-1, 0, 0, 1, 2, 1, 2, 1, -1, 1]]
# [[000_232303]
# [_0012121_1]]
Thank you for reading:)
I realize that scatter_add and gather perfectly solve the problem.

matplotlib shift pcolormesh plot to symmetrized coordinates

I have some 2D data with x and y coordinates both within [0,1], plotted using pcolormesh.
Now I want to symmetrize the plot to [-0.5, 0.5] for both x and y coordinates. In Matlab I was able to achieve this by changing x and y from e.g. [0, 0.2, 0.4, 0.6, 0.8] to [0, 0.2, 0.4, -0.4, -0.2], without rearranging the data. However, with pcolormesh I cannot get the desired result.
A minimum example is shown below, with data represented simply by x+y:
import matplotlib.pyplot as plt
import numpy as np
x,y = np.mgrid[0:1:5j,0:1:5j]
fig,(ax1,ax2,ax3) = plt.subplots(1,3,figsize=(9,3.3),constrained_layout=1)
# original plot spanning [0,1]
img1 = ax1.pcolormesh(x,y,x+y,shading='auto')
# shift x and y from [0,1] to [-0.5,0.5]
x = x*(x<0.5)+(x-1)*(x>0.5)
y = y*(y<0.5)+(y-1)*(y>0.5)
img2 = ax2.pcolormesh(x,y,x+y,shading='auto') # similar code works in Matlab
# for this specific case, the following is close to the desired result, I can just rename x and y tick labels
# to [-0.5,0.5], but in general data is not simply x+y
img3 = ax3.pcolormesh(x+y,shading='auto')
fig.colorbar(img1,ax=[ax1,ax2,ax3],orientation='horizontal')
The corresponding figure is below, any suggestion on what is missed would be appreciated!
Let's look at what you want to achieve in a 1D example.
You have x values between 0 and 1 and a dummy function f(x) = 20*x to produce some values.
# x = [0, .2, .4, .6, .8] -> [0, .2, .4, -.4, -.2] -> [-.4, .2, .0, .2, .4])
# fx = [0, 4, 8, 12, 16] -> [0, 4, 8, 12, 16] -> [ 12, 16, 0, 4, 8]
# ^ only flip and shift x not fx ^
You could use np.roll() to achieve the last operation.
I used n=14 to make the result better visible and show that this approach works for arbitrary n.
import numpy as np
import matplotlib.pyplot as plt
n = 14
x, y = np.meshgrid(np.linspace(0, 1, n, endpoint=False),
np.linspace(0, 1, n, endpoint=False))
z = x + y
x_sym = x*(x <= .5)+(x-1)*(x > .5)
# array([[ 0. , 0.2, 0.4, -0.4, -0.2], ...
x_sym = np.roll(x_sym, n//2, axis=(0, 1))
# array([[-0.4, -0.2, 0. , 0.2, 0.4], ...
y_sym = y*(y <= .5)+(y-1)*(y > .5)
y_sym = np.roll(y_sym, n//2, axis=(0, 1))
z_sym = np.roll(z, n//2, axis=(0, 1))
# array([[1.2, 1.4, 0.6, 0.8, 1. ],
# [1.4, 1.6, 0.8, 1. , 1.2],
# [0.6, 0.8, 0. , 0.2, 0.4],
# [0.8, 1. , 0.2, 0.4, 0.6],
# [1. , 1.2, 0.4, 0.6, 0.8]])
fig, (ax1, ax2) = plt.subplots(1, 2)
img1 = ax1.imshow(z, origin='lower', extent=(.0, 1., .0, 1.))
img2 = ax2.imshow(z_sym, origin='lower', extent=(-.5, .5, -.5, .5))

How to color the edges of a 3d graph in Mayavi?

I have a 3d graph created using Mayavi and the edges have to be colored by a scalar value.
The following code creates the graph but I am not sure how to color the edges
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from mayavi import mlab
def main(edge_color=(0.8, 0.8, 0.8), edge_size=0.02):
t = [1, 2, 4, 4, 5, 3, 5]
h = [2, 3, 6, 5, 6, 4, 1]
ed_ls = [(x, y) for x, y in zip(t, h)]
G = nx.OrderedGraph()
G.add_edges_from(ed_ls)
nx.draw(G)
plt.show()
graph_pos = nx.spring_layout(G, dim=3)
# numpy array of x,y,z positions in sorted node order
xyz = np.array([graph_pos[v] for v in sorted(G)])
mlab.figure(1)
mlab.clf()
pts = mlab.points3d(xyz[:, 0], xyz[:, 1], xyz[:, 2])
pts.mlab_source.dataset.lines = np.array(G.edges())
tube = mlab.pipeline.tube(pts, tube_radius=edge_size)
mlab.pipeline.surface(tube, color=edge_color)
mlab.show() # interactive window
main()
Scalar values to be used for coloring the edges
scalar = [0.1, 0.7, 0.3, 0.5, 0.9, 0.8, 0.2]
Any suggestions on how to do this will be really helpful.
I also see another problem in the 3d graph that has been created. One of the edges is not connected to a node.
EDIT: From what I understand, mlab.pipeline.surface(tube, color=edge_color)
is used to color the edge/tube .
Updated code:
def main(edge_color=(0.8, 0.2, 0.8), edge_size=0.02, graph_colormap='winter'):
t = [1, 2, 4, 4, 5, 3, 5]
h = [2, 3, 6, 5, 6, 4, 1]
ed_ls = [(x, y) for x, y in zip(t, h)]
G = nx.OrderedGraph()
G.add_edges_from(ed_ls)
nx.draw(G)
plt.show()
scalars = np.array(G.nodes())+5
pprint(scalars)
e_color = [(0.8, 0.2, 0.8), (0.8, 0.2, 0.8), (0.8, 0.2, 0.8),
(0.8, 0.2, 0.8), (0.8, 0.2, 0.8), (0.8, 0.2, 0.8),
(0.8, 0.2, 0.8)]
graph_pos = nx.spring_layout(G, dim=3)
# numpy array of x,y,z positions in sorted node order
xyz = np.array([graph_pos[v] for v in sorted(G)])
mlab.figure(1)
mlab.clf()
pts = mlab.points3d(xyz[:, 0], xyz[:, 1], xyz[:, 2],
scalars,
colormap=graph_colormap
)
pts.mlab_source.dataset.lines = np.array(G.edges())
tube = mlab.pipeline.tube(pts, tube_radius=edge_size)
#mlab.pipeline.surface(tube, color=e_color) # doesn't work
mlab.pipeline.surface(tube, color=edge_color) # doesn't work
mlab.show() # interactive window
But the problems is I am no able to assign different color for different edge/tube
A possible solution, not at all automated, but sufficient for a proof of concept.
import networkx as nx
import numpy as np
from mayavi import mlab
t = [1, 2, 4, 4, 5, 3, 5]
h = [2, 3, 6, 5, 6, 4, 1]
ed_ls = [(x, y) for x, y in zip(t, h)]
G = nx.OrderedGraph()
G.add_edges_from(ed_ls)
graph_pos = nx.spring_layout(G, dim=3)
xyz = np.array([graph_pos[v] for v in G])
print(xyz.shape)
mlab.points3d(xyz[:, 0], xyz[:, 1], xyz[:, 2],
np.linspace(1, 2, xyz.shape[0]),
colormap='winter', resolution=100, scale_factor=0.3)
smallTri = np.tile(xyz[-3:, :], (2, 1))[:4, :]
remEdges = np.vstack((xyz[-1, :], xyz[:-2, :]))
allEdges = np.vstack((smallTri, remEdges))
for i in range(allEdges.shape[0] - 1):
mlab.plot3d(allEdges[i:i + 2, 0], allEdges[i:i + 2, 1],
allEdges[i:i + 2, 2], color=(0.2, 1 - 0.1 * i, 0.8))
mlab.show()

Tensorflow histogram with custom bins

I have two tensors - one with bin specification and the other one with observed values. I'd like to count how many values are in each bin.
I know how to do this in either NumPy or bare Python, but I need to do this in pure TensorFlow. Is there a more sophisticated version of tf.histogram_fixed_width with an argument for bin specification?
Example:
# Input - 3 bins and 2 observed values
bin_spec = [0, 0.5, 1, 2]
values = [0.1, 1.1]
# Histogram
[1, 0, 1]
This seems to work, although I consider it to be quite memory- and time-consuming.
import tensorflow as tf
bins = [-1000, 1, 3, 10000]
vals = [-3, 0, 2, 4, 5, 10, 12]
vals = tf.constant(vals, dtype=tf.float64, name="values")
bins = tf.constant(bins, dtype=tf.float64, name="bins")
resh_bins = tf.reshape(bins, shape=(-1, 1), name="bins-reshaped")
resh_vals = tf.reshape(vals, shape=(1, -1), name="values-reshaped")
left_bin = tf.less_equal(resh_bins, resh_vals, name="left-edge")
right_bin = tf.greater(resh_bins, resh_vals, name="right-edge")
resu = tf.logical_and(left_bin[:-1, :], right_bin[1:, :], name="bool-bins")
counts = tf.reduce_sum(tf.to_float(resu), axis=1, name="count-in-bins")
with tf.Session() as sess:
print(sess.run(counts))

Resources