how to find best transformation matrix for aligning two 2D point set to get minimum Mean squared error value. this code is what I have done but this is not right: tform * src.
import numpy as np
from skimage import transform as tf
from sklearn.metrics import mean_squared_error
# estimate transformation parameters
src = np.array([0, 0, 10, 10]).reshape((2, 2))
dst = np.array([12, 14, 1, -20]).reshape((2, 2))
tform = tf.estimate_transform('similarity', src, dst)
print(src)
print(dst)
print(tform.params)
msq=mean_squared_error(tform*src,dst)
finally I could find a right answer for my question
import numpy as np
from skimage import transform as tf
from sklearn.metrics import mean_squared_error
# estimate transformation parameters
src = np.array([0,0 , 1,0 , 1,1 , 0,1]).reshape((4, 2))
dst = np.array([3,1 , 3,2 , 2,2 , 2,1]).reshape((4, 2))
tform = tf.estimate_transform('similarity', src, dst)
#tform is the transformation matrix for these data to align them
print(src)
print(dst)
print(tform.params)
mt = tf.matrix_transform(src, tform.params)#mt is the same dst
mean_squared_error(mt,dst) #should be zero
print( '{:.10f}'.format(mean_squared_error(mt,dst)) )
Related
I am trying to estimate a normal density using a quadratic approximation in tensorflow (code 4.14 from McElreath's Statistical Rethinking).
The code I have so far is:
import pandas as pd
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
_BASE_URL = "https://raw.githubusercontent.com/rmcelreath/rethinking/Experimental/data"
HOWELL_DATASET_PATH = f"{_BASE_URL}/Howell1.csv"
df = pd.read_csv(HOWELL_DATASET_PATH, sep=';')
df = df[df['age'] >= 18]
mu = tf.linspace(start=140.0, stop=160.0, num=200)
sigma= tf.linspace(start=4.0, stop=9.0, num=200)
tf.reduce_sum(tfd.Normal(loc=mu, scale=sigma).log_prob(df.height))
This fails due to df having shape (352,) whilst I am creating (200,) points for my normal distribution to be evaluated on.
However
tf.reduce_sum(tfd.Normal(loc=mu, scale=sigma).log_prob(2))
and
tf.reduce_sum(tfd.Normal(loc=mu[0], scale=sigma[0]).log_prob(df.height))
both work.
I need to create a (200, 352) tensor - one Normal for each mu, sigma on my grid, and then evaluate it with my sample data - df. The question I have is: how do I do this?
I think TFP's joint distribution is a nice way to express this:
mu = tf.linspace(start=140.0, stop=160.0, num=200)
sigma = tf.linspace(start=7.0, stop=9.0, num=200)
def mk_joint(nobs):
return tfd.JointDistributionNamed(dict(
mu=tfd.Normal(178, 20),
sigma=tfd.Uniform(0, 50),
height=lambda mu, sigma: tfd.Sample(tfd.Normal(loc=mu, scale=sigma), nobs)
))
joint = mk_joint(len(df))
joint.sample()
print(f'joint event shape: {joint.event_shape}')
lp = joint.log_prob(dict(mu=mu[:,tf.newaxis], sigma=sigma, height=df.height))
import matplotlib.pyplot as plt
plt.imshow(lp)
plt.xlabel('sigma')
plt.xticks(np.arange(len(sigma))[::10], sigma[::10].numpy().round(2), rotation=90)
plt.ylabel('mu')
plt.yticks(np.arange(len(mu))[::10], mu[::10].numpy().round(2))
plt.show()
=>
joint event shape: {'sigma': TensorShape([]), 'mu': TensorShape([]), 'height': TensorShape([352])}
So, I figured out that one way to do it would be to create a (200, 200, 352) grid and then reshape, and the rest of the calculations follow straightforwardly.
import pandas as pd
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
_BASE_URL = "https://raw.githubusercontent.com/rmcelreath/rethinking/Experimental/data"
HOWELL_DATASET_PATH = f"{_BASE_URL}/Howell1.csv"
df = pd.read_csv(HOWELL_DATASET_PATH, sep=';')
df = df[df['age'] >= 18]
mu = tf.linspace(start=140.0, stop=160.0, num=200)
sigma = tf.linspace(start=7.0, stop=9.0, num=200)
means, variances, _ = tf.meshgrid(mu, sigma, np.zeros((352,)).astype(np.float32))
means = tf.reshape(means, [40000, 352])
variances = tf.reshape(variances, [40000, 352])
normal = tfd.Normal(loc=means, scale=variances)
log_lik = tf.reduce_sum(normal.log_prob(df.height), axis=1)
logprob_mu = tfd.Normal(178.0, 20.0).log_prob(means)
logprob_sigma = tfd.Uniform(low=0.0, high=50.0).log_prob(variances)
log_joint_prod = log_lik + logprob_mu[:, 0] + logprob_sigma[:, 0]
joint_prob_tf = tf.exp(log_joint_prod - tf.reduce_max(log_joint_prod))
I am trying to plot a netCDF4 file containing ocean currents from a NASA database for a project, but I keep getting errors such as "x and y coordinates are not compatible with the shape of the vector components".
I have tried changing the streamplot to a contourf (when I did it said that it needed to be a 2d array) which I tried to change but I could not get it to work.
import os
import matplotlib.pyplot as plt
from netCDF4 import Dataset as netcdf_dataset
import numpy as np
import cartopy.crs as ccrs
fname = "oscar_vel2019.nc.gz.nc"
data=netcdf_dataset(fname)
v = data.variables['v'][0, :, :, :]
vf = data.variables['vm'][0, :, :, :]
u = data.variables['u'][0, :, :, :]
uf = data.variables['um'][0, :, :, :]
lats = data.variables['latitude'][:]
lons = data.variables['longitude'][:]
ax = plt.axes(projection=ccrs.PlateCarree())
mymap=plt.streamplot(lons, lats, u, v, 60, transform=ccrs.PlateCarree())
ax.coastlines()
plt.show()
I would like it to work such that the ocean currents are visible on the plot and to show the movement of particles in the currents through an animation. I really don't have much knowledge with this which is why I am asking. Here is the link from which I got the file: https://podaac-opendap.jpl.nasa.gov/opendap/hyrax/allData/oscar/preview/L4/oscar_third_deg/oscar_vel2019.nc.gz.html
OK, I downloaded the data. The problem is that u and v are 4-dimensional, so you need to squeeze out the "depth" dimension. Cartopy also doesn't accept longitudes greater than 180, and you probably won't get away with stream plotting the whole thing. Also, density=60 will take forever...
This is ugly, but gives you the idea.
import xarray as xr
import numpy as np
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
with xr.open_dataset('/Users/jklymak/downloads/oscar_vel2019.nc.gz.nc') as ds:
print(ds)
ax = plt.axes(projection=ccrs.PlateCarree())
dec = 10
lon = ds.longitude.values[::dec]
lon[lon>180] = lon[lon>180] - 360
mymap=plt.streamplot(lon, ds.latitude.values[::dec], ds.u.values[0, 0, ::dec, ::dec], ds.v.values[0, 0, ::dec, ::dec], 6, transform=ccrs.PlateCarree())
ax.coastlines()
plt.show()
I have a histogram and I'm trying to fit the best norm(Gaussian) function as you can see below. the problem is that the gaussian fit isn't the best fit that I expected.
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from astropy.modeling import models, fitting
bins=np.arange(-1,8,0.3)
#Reading data
a18 = np.loadtxt('AndXII18I.srt')
arr18 = np.array(a18[:,11])
axs[0,0].hist(arr18,bins,histtype='step')
axs[0,0].set_xlim([np.min(arr18), np.max(arr18)])
x = np.linspace(-1, bins[len(bins)-2],len(bins)-1)
x1 = np.linspace(-1, 8, 1000)
# guesses for the parameters:
g_init = models.Gaussian1D(1, 0, 1.)
fit_g = fitting.LevMarLSQFitter()
axs[0,0].plot(x1,t18)
axs[0,0].plot(edges18[8],hist18[8],'o')
g18 = fit_g(g_init, x, y18[0])
a18=g18.mean
t18=g18.amplitude*np.exp(-(x1-a18)**2/(2*g18.stddev**2))
plt.show()
I am trying to create a stereographic plot using Basemap offset from the north pole, but the west-east directions are apparently reversed. Is this an error in my implementation, or a bug?
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
m = Basemap(projection='stere',
lat_0=90, lon_0=270, lat_ts=(90.+35.)/2.,
llcrnrlon=150,urcrnrlon=-60,llcrnrlat=50,urcrnrlat=50)
m.drawmeridians(np.arange(0,360,30),labels=[1,1,1,0])
m.drawparallels(np.arange(-90,90,5))
m.drawcoastlines()
m.shadedrelief()
plt.show()
Here is the result:
result from script
How might I reproduce the following map (which is offset-centred, and rotated?)
Restricted map
Using an azimuthal type of map projection always requires a set of proper parameters to get a good result. In this case, Stereographic projection centered at the north pole, its proper parameters are not what you usually use when implement with PlateCaree projection which is often used. Here is a working code that you may try.
# Stereographic projection coverage
# should be specified less than half of a hemisphere
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
m = Basemap(projection='stere', resolution='c',
lat_0=90, lon_0=270, lat_ts=(90.+35.)/2., width=15000000, height=10000000)
# (width, height) is the plot extents in meters
m.drawmeridians(np.arange(0, 360, 30), labels=[1,1,1,0])
m.drawparallels(np.arange(0, 90, 10), labels=[0,0,0,1])
m.drawcoastlines()
m.shadedrelief()
plt.show()
The resulting plot (map 1):
To get other part of the world into the plotting area is achieved by recentering the map.
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
# projection center point
lon0 = 180
lat0 = 60
m = Basemap(projection='stere', resolution='c',
lat_0=lat0, lon_0=lon0, lat_ts=lat0, width=15000000, height=10000000)
m.drawmeridians(np.arange(0, 360, 30), labels=[1,0,0,1]) # left, right, top, bottom
m.drawparallels(np.arange(0, 90, 10), labels=[0,1,1,0])
m.drawcoastlines()
m.shadedrelief()
plt.show()
The output plot (map 2):
By specifying proper values of llcrnrlon, urcrnrlon, llcrnrlat, urcrnrlat, in Basemap() one can get the map extents as required. Here is another example of plot as requested by the OP.
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure(figsize=(8,8))
m = Basemap(projection='stere', resolution='c',
lat_0=90, lon_0=-90, lat_ts=(90.+35.)/2.,
llcrnrlon=-142, urcrnrlon=78, llcrnrlat=19, urcrnrlat=45)
m.drawmeridians(np.arange(0, 360, 30), labels=[1,0,1,0]) # left, right, top, bottom
m.drawparallels(np.arange(0, 90, 10), labels=[0,1,0,1])
m.drawcoastlines()
m.shadedrelief()
plt.show()
The resulting plot (map 3):
I would like to generate a skeleton out of an image. Since the edges that are generated using skimage from the original image isn't smooth, the resulting skeleton obtained from binary has disconnected edges with knots.
import skimage
from skimage import data,io,filters
import numpy as np
import cv2
import matplotlib.pyplot as plt
from skimage.filters import threshold_adaptive,threshold_mean
from skimage.morphology import binary_dilation
from skimage import feature
from skimage.morphology import skeletonize_3d
imgfile = "edit.jpg"
image = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
thresh = threshold_mean(image)
binary = image > thresh
edges = filters.sobel(binary)
dilate = feature.canny(binary,sigma=0)
skeleton = skeletonize_3d(binary)
fig, axes = plt.subplots(nrows=2,ncols=2, figsize=(8, 2))
ax = axes.ravel()
ax[0].imshow(binary, cmap=plt.cm.gray)
ax[0].set_title('binarize')
ax[1].imshow(edges, cmap=plt.cm.gray)
ax[1].set_title('edges')
ax[2].imshow(dilate, cmap=plt.cm.gray)
ax[2].set_title('dilates')
ax[3].imshow(skeleton, cmap=plt.cm.gray)
ax[3].set_title('skeleton')
for a in ax:
a.axis('off')
plt.show()
I tried using dilate to smoothen the jagged edges. But the contours in the skeleton has two edges instead of a single edge that is desired.
I would like to ask for suggestions on how the edges can be smoothened to avoid knots and disconnected edges in the resulting skeleton.
Input image
Output images
Edit:After using gaussian smoothing
binary = image > thresh
gaussian = skimage.filters.gaussian(binary)
skeleton = skeletonize_3d(gaussian)
This median filter should do the work on your binary image for the skeletonization.
import scipy
binary_smoothed = scipy.signal.medfilt (binary, 3)
For the borders, I will probably use this and play with the parameters as shown in the link below
https://claudiovz.github.io/scipy-lecture-notes-ES/advanced/image_processing/auto_examples/plot_canny.html:
from image_source_canny import canny
borders = canny (binary_smoothed, 3, 0.3, 0.2)