I am trying to generate a list of values, and then plot those list of values using the MatPlotLib Hist function. This is what my graph looks like this: https://i.stack.imgur.com/u7P8Q.png
I've followed this process for two other graphs with no trouble at all, for some reason this one is giving me trouble.
This is my code:
for y in range(Nloop):
e0_y = e_List[y] # Takes the a value from these list and sets it equal to this variable
a0_y = a_List[y] # Same thing ^^^
Term_1 = (a0_y ** 4)/((m_tot**3)*eta)
Term_2 = (1 - e0_y**2)**(7/2)
Unit_Conversions = (c**5/G**3)
tau = (3/85) * Term_1 * Term_2 * Unit_Conversions
T_List[y] = tau
MIN, MAX = 1, 1e12
Nbins = 25
bins = 10 ** np.linspace(np.log10(MIN), np.log10(MAX), Nbins)
plt.hist(T_List, bins=25, histtype='step')
plt.xscale('log') # x-axis now has a log-scale
plt.yscale('log')
plt.title('Distribution')
plt.xlabel('T [sec]')
plt.ylabel('Frequency')
plt.show()
Related
I am visualizing four classes of the spectrogram. For a single class, My spectrogram code looks like this
Considering this as one image.
And the code to produce this, is
def spec(filename):
time_period = 0.5 # FFT time period (in seconds). Can comfortably process time frames from 0.05 seconds - 10 seconds
# ==============================================
fs_rate, signal_original = wavfile.read(filename)
total_time = int(np.floor(len(signal_original)/fs_rate))
sample_range = np.arange(0,total_time,time_period)
total_samples = len(sample_range)
print ("Frequency sampling", fs_rate)
print ("total time: ", total_time)
print ("sample time period: ", time_period)
print ("total samples: ", total_samples)
output_array = []
for i in sample_range:
# print ("Processing: %d / %d (%d%%)" % (i/time_period + 1, total_samples, (i/time_period + 1)*100/total_samples))
sample_start = int(i*fs_rate)
sample_end = int((i+time_period)*fs_rate)
signal = signal_original[sample_start:sample_end]
l_audio = len(signal.shape)
#print ("Channels", l_audio)
if l_audio == 2:
signal = signal.sum(axis=1) / 2
N = signal.shape[0]
#print ("Complete Samplings N", N)
secs = N / float(fs_rate)
# print ("secs", secs)
Ts = 1.0/fs_rate # sampling interval in time
#print ("Timestep between samples Ts", Ts)
t = scipy.arange(0, secs, Ts) # time vector as scipy arange field / numpy.ndarray
FFT = abs(scipy.fft(signal))
FFT_side = FFT[range(int(N/2))] # one side FFT range
freqs = scipy.fftpack.fftfreq(signal.size, t[1]-t[0])
fft_freqs = np.array(freqs)
freqs_side = freqs[range(int(N/2))] # one side frequency range
fft_freqs_side = np.array(freqs_side)
# Reduce to 0-5000 Hz
bucket_size = 5
buckets = 16
FFT_side = FFT_side[0:bucket_size*buckets]
fft_freqs_side = fft_freqs_side[0:bucket_size*buckets]
# Combine frequencies into buckets
FFT_side = np.array([int(sum(FFT_side[current: current+bucket_size])) for current in range(0, len(FFT_side), bucket_size)])
fft_freqs_side = np.array([int(sum(fft_freqs_side[current: current+bucket_size])) for current in range(0, len(fft_freqs_side), bucket_size)])
# FFT_side: Normalize (0-1)
max_value = max(FFT_side)
if (max_value != 0):
FFT_side_norm = FFT_side / max_value
# Append to output array
output_array.append(FFT_side_norm)
# ============================================
# Plotting
plt.figure(figsize=(4,7))
plt.subplot(411)
plt.subplots_adjust(hspace = 0.5)
plt.plot(t, signal, "g") # plotting the signal
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.subplot(412)
diff = np.diff(fft_freqs_side)
widths = np.hstack([diff, diff[-1]])
plt.bar(fft_freqs_side, abs(FFT_side_norm), width=widths) # plotting the positive fft spectrum
plt.xticks(fft_freqs_side, fft_freqs_side, rotation='vertical')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Count single-sided')
FFT_side_norm_line = FFT_side_norm.copy()
FFT_side_norm_line.resize( (1,buckets) )
plt.subplot(413)
plt.imshow(FFT_side_norm_line)
plt.xlabel('Image Representation 1D')
plt.show()
print("\n\n\n\n\n\n")
How can I combine four images plot like this, and make a single output image. Something like this
I'd suggest using fig.subfigures and plt.subplot_mosaic.
The plot above was obtained using this simple script:
import matplotlib.pyplot as plt
fig = plt.figure(figsize = (8, 10), layout='constrained')
# next two lines make the trick
sfigs = fig.subfigures(2,2)
mosaics = [f.subplot_mosaic('t;t;t;f;f;f;i;.') for f in sfigs.flat]
# next, "how to" reference the subplots in subfigures
mosaics[0]['t'].plot(range(5), color='b')
mosaics[1]['t'].plot(range(5), color='k')
mosaics[2]['t'].plot(range(5), color='r')
mosaics[3]['t'].plot(range(5), color='g')
mosaics[0]['f'].plot(range(3), color='b')
mosaics[1]['f'].plot(range(3), color='k')
mosaics[2]['f'].plot(range(3), color='r')
mosaics[3]['f'].plot(range(3), color='g')
mosaics[0]['i'].imshow([range(10)]*2)
plt.show()
You can do it this way:
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(x, y)
axs[0, 0].set_title('Axis [0, 0]')
axs[0, 1].plot(x, y, 'tab:orange')
axs[0, 1].set_title('Axis [0, 1]')
axs[1, 0].plot(x, -y, 'tab:green')
axs[1, 0].set_title('Axis [1, 0]')
axs[1, 1].plot(x, -y, 'tab:red')
axs[1, 1].set_title('Axis [1, 1]')
for ax in axs.flat:
ax.set(xlabel='x-label', ylabel='y-label')
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
The result will be like this:
Taken from https://matplotlib.org/stable/gallery/subplots_axes_and_figures/subplots_demo.html
I have a dataset from kaggle of 45,253 rows and a single column for temperature in Kelvin for the city of Detroit. It's mean = 282.97, std = 11, min = 243.48, max = 308.05.
This is the result when plotted as a histogram of 100 bins with density=True:
I am expected to write the following two functions and see whichever one approximates the closest to the histogram:
Like this one here using scipy.stats.norm.pdf:
I generated the above image using:
x = np.linspace(dataset.Detroit.min(), dataset.Detroit.max(), 1001)
P_norm = norm.pdf(x, dataset.Detroit.mean(), dataset.Detroit.std())
plot_pdf_single(x, P_norm)
However, whenever I try to implement any of the two approximation functions all of my values for P_norm result in 0s or infs.
This is what I tried:
P_norm = [(1.0/(np.sqrt(2.0*pi*(std*std))))*np.exp(((-x_i-mu)*(-x_i-mu))/(2.0*(std*std))) for x_i in x]
I also broke it down into parts for a single x_i:
part1 = ((-x[0] - mu)*(-x[0] - mu)) / (2.0*(std * std))
part2 = np.exp(part1)
part3 = 1.0 / (np.sqrt(2.0 * pi * (std*std)))
total = part3*part2
I got the following values:
1145.3913234604413
inf
0.036267480036493875
inf
Since both of the equations use the same formula:
def pdf_approximation(x_i, mu, std):
return (1.0 / (np.sqrt(2.0 * pi * (std*std)))) * np.exp((-(x_i-mu)*(x_i-mu)) / (2.0 * (std*std)))
The code for the first approximation is:
mu = 283
std = 11
P_norm = np.array([pdf_approximation(x_i, mu, std) for x_i in x])
plot_pdf_single(x, P_norm)
The code for the second approximation is:
mu1 = 276
std1 = 6
mu2 = 293
std2 = 6.5
P_norm = np.array([(pdf_approximation(x_i, mu1, std1) * 0.5) + (pdf_approximation(x_i, mu2, std2) * 0.5) for x_i in x])
plot_pdf_single(x, P_norm)
I have 2 variables 'Root zone' and 'Tree cover' both are geolocated (NetCDF) (which are basically grids with each grid having a specific value). The values in TC varies from 0 to 100. Each grid size is 0.25 degrees (might be helpful in understanding the distance).
My problem is "I want to calculate the distance of each TC value ranging between 70-100 and 30-70 (so each value of TC value greater than 30 at each lat and lon) from the points where nearest TC ranges between 0-30 (less than 30)."
What I want to do is create a 2-dimensional scatter plot with X-axis denoting the 'distance in km of 70-100 TC (and 30-70 TC) from 0-30 values', Y-axis denoting 'RZS of those 70-100 TC points (and 30-70 TC)'
#I read the files using xarray
deficit_annual = xr.open_dataset('Rootzone_CHIRPS_era5_2000-2015_annual_SA_masked.nc')
tc = xr.open_dataset('Treecover_MODIS_2000-2015_annual_SA_masked.nc')
fig, ax = plt.subplots(figsize = (8,8))
## year I am interested in
year = 2000
i = year - 2000
# Select the indices of the low- and high-valued points
# This will results in warnings here because of NaNs;
# the NaNs should be filtered out in the indices, since they will
# compare to False in all the comparisons, and thus not be
# indexed by 'low' and 'high'
low = (tc[i,:,:] <= 30) # Savanna
moderate = (tc[i,:,:] > 30) & (tc[i,:,:] < 70) #Transitional forest
high = (tc[i,:,:] >= 70) #Forest
# Get the coordinates for the low- and high-valued points,
# combine and transpose them to be in the correct format
y, x = np.where(low)
low_coords = np.array([x, y]).T
y, x = np.where(high)
high_coords = np.array([x, y]).T
y, x = np.where(moderate)
moderate_coords = np.array([x, y]).T
# We now calculate the distances between *all* low-valued points, and *all* high-valued points.
# This calculation scales as O^2, as does the memory cost (of the output),
# so be wary when using it with large input sizes.
from scipy.spatial.distance import cdist, pdist
distances = cdist(low_coords, moderate_coords, 'euclidean')
# Now find the minimum distance along the axis of the high-valued coords,
# which here is the second axis.
# Since we also want to find values corresponding to those minimum distances,
# we should use the `argmin` function instead of a normal `min` function.
indices = distances.argmin(axis=1)
mindistances = distances[np.arange(distances.shape[0]), indices]
minrzs = np.array(deficit_annual[i,:,:]).flatten()[indices]
plt.scatter(mindistances*25, minrzs, s = 60, alpha = 0.5, color = 'goldenrod', label = 'Trasitional Forest')
distances = cdist(low_coords, high_coords, 'euclidean')
# Now find the minimum distance along the axis of the high-valued coords,
# which here is the second axis.
# Since we also want to find values corresponding to those minimum distances,
# we should use the `argmin` function instead of a normal `min` function.
indices = distances.argmin(axis=1)
mindistances = distances[np.arange(distances.shape[0]), indices]
minrzs = np.array(deficit_annual[i,:,:]).flatten()[indices]
plt.scatter(mindistances*25, minrzs, s = 60, alpha = 1, color = 'green', label = 'Forest')
plt.xlabel('Distance from Savanna (km)', fontsize = '14')
plt.xticks(fontsize = '14')
plt.yticks(fontsize = '14')
plt.ylabel('Rootzone storage capacity (mm/year)', fontsize = '14')
plt.legend(fontsize = '14')
#plt.ylim((-10, 1100))
#plt.xlim((0, 30))
What I want is to know whether the code seems to have an error (as it is working now, but doesn't seem to work when I increase the 'high = (tc[i,:,:] >= 70 ` to 80 for year 2000. This makes me wonder if the code is correct or not.
Secondly, is it possible to define a 20 km buffer region of 'low = (tc[i,:,:] <= 30)'. What I mean is that the 'low' is defined only when a cluster of Tree cover values are below 30 and not by an individual pixel.
Some netCDF files are attached in the link below:
https://www.dropbox.com/sh/unm96q7sfto8y53/AAA7e12bs07XtpMiVFdML_PIa?dl=0
The graph I want is something like this (derived from the code above).
Thank you for your help.
I was trying to take a oscillation avarage of a highly oscillating data. The oscillations are not uniform, it has less oscillations in the initial regions.
x = np.linspace(0, 1000, 1000001)
y = some oscillating data say, sin(x^2)
(The original data file is huge, so I can't upload it)
I want to take a weighted moving avarage of the function and plot it. Initially the period of the function is larger, so I want to take avarage over a large time interval. While I can do with smaller time interval latter.
I have found a possible elegant solution in following post:
Weighted moving average in python
However, I want to have different width in different regions of x. Say when x is between (0,100) I want the width=0.6, while when x is between (101, 300) width=0.2 and so on.
This is what I have tried to implement( with my limited knowledge in programing!)
def weighted_moving_average(x,y,step_size=0.05):#change the width to control average
bin_centers = np.arange(np.min(x),np.max(x)-0.5*step_size,step_size)+0.5*step_size
bin_avg = np.zeros(len(bin_centers))
#We're going to weight with a Gaussian function
def gaussian(x,amp=1,mean=0,sigma=1):
return amp*np.exp(-(x-mean)**2/(2*sigma**2))
if x.any() < 100:
for index in range(0,len(bin_centers)):
bin_center = bin_centers[index]
weights = gaussian(x,mean=bin_center,sigma=0.6)
bin_avg[index] = np.average(y,weights=weights)
else:
for index in range(0,len(bin_centers)):
bin_center = bin_centers[index]
weights = gaussian(x,mean=bin_center,sigma=0.1)
bin_avg[index] = np.average(y,weights=weights)
return (bin_centers,bin_avg)
It is needless to say that this is not working! I am getting the plot with the first value of sigma. Please help...
The following snippet should do more or less what you tried to do. You have mainly a logical problem in your code, x.any() < 100 will always be True, so you'll never execute the second part.
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 10, 1000)
y = np.sin(x**2)
def gaussian(x,amp=1,mean=0,sigma=1):
return amp*np.exp(-(x-mean)**2/(2*sigma**2))
def weighted_average(x,y,step_size=0.3):
weights = np.zeros_like(x)
bin_centers = np.arange(np.min(x),np.max(x)-.5*step_size,step_size)+.5*step_size
bin_avg = np.zeros_like(bin_centers)
for i, center in enumerate(bin_centers):
# Select the indices that should count to that bin
idx = ((x >= center-.5*step_size) & (x <= center+.5*step_size))
weights = gaussian(x[idx], mean=center, sigma=step_size)
bin_avg[i] = np.average(y[idx], weights=weights)
return (bin_centers,bin_avg)
idx = x <= 4
plt.plot(*weighted_average(x[idx],y[idx], step_size=0.6))
idx = x >= 3
plt.plot(*weighted_average(x[idx],y[idx], step_size=0.1))
plt.plot(x,y)
plt.legend(['0.6', '0.1', 'y'])
plt.show()
However, depending on the usage, you could also implement moving average directly:
x = np.linspace(0, 60, 1000)
y = np.sin(x**2)
z = np.zeros_like(x)
z[0] = x[0]
for i, t in enumerate(x[1:]):
a=.2
z[i+1] = a*y[i+1] + (1-a)*z[i]
plt.plot(x,y)
plt.plot(x,z)
plt.legend(['data', 'moving average'])
plt.show()
Of course you could then change a adaptively, e.g. depending of the local variance. Also note that this has apriori a small bias depending on a and the step size in x.
My aim is to make the image1 move along the ring from its current position upto 180 degree. I have been trying to do different things but nothing seem to work. My final aim is to move both the images along the ring in different directions and finally merge them to and make them disappear.I keep getting the error above.Can you please help? Also can you tell how I can go about this problem?
from visual import *
import numpy as np
x = 3
y = 0
z = 0
i = pi/3
c = 0.120239 # A.U/minute
r = 1
for theta in arange(0, 2*pi, 0.1): #range of theta values; 0 to
xunit = r * sin(theta)*cos(i) +x
yunit = r * sin(theta)*sin(i) +y
zunit = r*cos(theta) +z
ring = curve( color = color.white ) #creates a curve
for theta in arange(0, 2*pi, 0.01):
ring.append( pos=(sin(theta)*cos(i) +x,sin(theta)*sin(i) +y,cos(theta) +z) )
image1=sphere(pos=(2.5,-0.866,0),radius=0.02, color=color.yellow)
image2=sphere(pos=(2.5,-0.866,0),radius=0.02, color=color.yellow)
earth=sphere(pos=(-3,0,-0.4),color=color.yellow, radius =0.3,material=materials.earth) #creates the observer
d_c_p = pow((x-xunit)**2 + (y-yunit)**2 + (z-zunit)**2,0.5) #calculates the distance between the center and points on ring
d_n_p = abs(yunit + 0.4998112152755791) #calculates the distance to the nearest point
t1 = ( d_c_p+d_n_p)/c
t0=d_c_p/c
t=t1-t0 #calculates the time it takes from one point to another
theta = []
t = []
dtheta = np.diff(theta) #calculates the difference in theta
dt = np.diff(t) #calculates the difference in t
speed = r*dtheta/dt #hence this calculates the speed
deltat = 0.005
t2=0
while True:
rate(5)
image2.pos = image2.pos + speed*deltat #increments the position of the image1
t2 = t2 + deltat
Your problem is that image2.pos is a vector (that's the "3" in the error message) but speed*deltat is a scalar (that's the "0" in the error message). You can't add a vector and a scalar. Instead of a scalar "speed" you need a vector velocity. There seem to be some errors in indentation in the program you posted, so there is some possibility I've misinterpreted what you're trying to do.
For VPython questions it's better to post to the VPython forum, where there are many more VPython users who will see your question than if you post to stackoverflow:
https://groups.google.com/forum/?fromgroups&hl=en#!forum/vpython-users