I use Floyd-Steinberg dithering in order to diffuse the quantization error after processing an image with KMeans from scipy. The given data is RGB file - both for grayscale and color. The problem is the visualisation - I get no dithering.
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
im = Image.open('file.png').convert('RGB')
pic = np.array(im, dtype = np.float)/255
im.close()
I would like to omit the KMeans part and focus on Floyd-Steinberg:
"""pic - as above, original array; image - processed image"""
def dither(pic, image):
v, c, s = pic.shape
Floyd = np.copy(image)
for i in range(1, v-1):
for j in range(1, c-1):
quan = pic[i][j] - image[i][j] #Quantization error
Floyd[i][j + 1] = quan * (np.float(7 / 16)) + Floyd[i][j + 1]
Floyd[i + 1][j - 1] = quan * (np.float(3 / 16)) + Floyd[i + 1][j - 1]
Floyd[i + 1][j] = quan * (np.float(5 / 16)) + Floyd[i + 1][j]
Floyd[i + 1][j + 1] = quan * (np.float(1 / 16)) + Floyd[i + 1][j + 1]
return Floyd
Floyd = dither(pic, image)
plt.imshow(Floyd)
plt.show()
I receive a little dithering when I replace Floyd with pic, i.e. Floyd[i + 1][j] = quan * (np.float(5 / 16)) + pic[i + 1][j]. However, this is improper code! Additionally, I have to deal with colours out of the clusters, thus I again assess the new pixels to clusters. How can I make it work? Where is THIS crucial mistake?
from PIL import Image
import cv2
import numpy as np
##################################################### Solution 1 ##############################################################
#PIL.Image.convert parametrs :
#https://pillow.readthedocs.io/en/4.2.x/reference/Image.html?highlight=image.convert#PIL.Image.Image.convert
#PIL.Image.convert Modes :
#https://pillow.readthedocs.io/en/4.2.x/handbook/concepts.html#concept-modes
#image convert to 1-bit pixels, black and white, stored with one pixel per byte and Dithering
imageConvert = Image.open('Image.PNG').convert(mode='1',dither=Image.FLOYDSTEINBERG)
imageConvert.save('DitheringWithPIL.png')
##################################################### Solution 2 ##############################################################
Image = cv2.imread('Image.PNG')
GrayImage = cv2.cvtColor(Image, cv2.COLOR_BGR2GRAY)
cv2.imwrite('GracyImage.PNG', GrayImage)
Height = GrayImage.shape[0]
Width = GrayImage.shape[1]
for y in range(0, Height):
for x in range(0, Width):
old_value = GrayImage[y, x]
new_value = 0
if (old_value > 128) :
new_value = 255
GrayImage[y, x] = new_value
Error = old_value - new_value
if (x<Width-1):
NewNumber = GrayImage[y, x+1] + Error * 7 / 16
if (NewNumber>255) : NewNumber=255
elif (NewNumber<0) : NewNumber=0
GrayImage[y, x+1] = NewNumber
if (x>0 and y<Height-1):
NewNumber = GrayImage[y+1, x-1] + Error * 3 / 16
if (NewNumber>255) : NewNumber=255
elif (NewNumber<0) : NewNumber=0
GrayImage[y+1, x-1] = NewNumber
if (y<Height-1):
NewNumber= GrayImage[y+1, x] + Error * 5 / 16
if (NewNumber>255) : NewNumber=255
elif (NewNumber<0) : NewNumber=0
GrayImage[y+1, x] = NewNumber
if (y<Height-1 and x<Width-1):
NewNumber = GrayImage[y+1, x+1] + Error * 1 / 16
if (NewNumber>255) : NewNumber=255
elif (NewNumber<0) : NewNumber=0
GrayImage[y+1, x+1] = NewNumber
cv2.imwrite('DitheringWithAlgorithm.PNG', GrayImage)
Related
I want to determine the quality score of the text by giving them some score or rating (something like ' image-text is 90% bad. Texts are not readable ).
What I am doing now is I am using the Blind/referenceless image spatial quality evaluator (BRISQUE) model to assess the quality.
It gives scores from 0 to 100. 0 score for good quality and 100 for bad quality.
The problem I am having with this code is that it is giving bad scores to even good quality "images-texts".
Also, the score exceeds 100 sometimes but according to the reference I am taking, the score should be between 0 to 100 only.
Can someone please suggest to me how can I get promising and reliable results for assessing the quality of the text-based images?
import collections
from itertools import chain
# import urllib.request as request
import pickle
import numpy as np
import scipy.signal as signal
import scipy.special as special
import scipy.optimize as optimize
# import matplotlib.pyplot as plt
import skimage.io
import skimage.transform
import cv2
from libsvm import svmutil
from os import listdir
# Calculating Local Mean
def normalize_kernel(kernel):
return kernel / np.sum(kernel)
def gaussian_kernel2d(n, sigma):
Y, X = np.indices((n, n)) - int(n/2)
gaussian_kernel = 1 / (2 * np.pi * sigma ** 2) * np.exp(-(X ** 2 + Y ** 2) / (2 * sigma ** 2))
return normalize_kernel(gaussian_kernel)
def local_mean(image, kernel):
return signal.convolve2d(image, kernel, 'same')
# Calculating the local deviation
def local_deviation(image, local_mean, kernel):
"Vectorized approximation of local deviation"
sigma = image ** 2
sigma = signal.convolve2d(sigma, kernel, 'same')
return np.sqrt(np.abs(local_mean ** 2 - sigma))
# Calculate the MSCN coefficients
def calculate_mscn_coefficients(image, kernel_size=6, sigma=7 / 6):
C = 1 / 255
kernel = gaussian_kernel2d(kernel_size, sigma=sigma)
local_mean = signal.convolve2d(image, kernel, 'same')
local_var = local_deviation(image, local_mean, kernel)
return (image - local_mean) / (local_var + C)
# It is found that the MSCN coefficients are distributed as a Generalized Gaussian Distribution (GGD) for a broader spectrum of distorted image.
# Calculate GGD
def generalized_gaussian_dist(x, alpha, sigma):
beta = sigma * np.sqrt(special.gamma(1 / alpha) / special.gamma(3 / alpha))
coefficient = alpha / (2 * beta() * special.gamma(1 / alpha))
return coefficient * np.exp(-(np.abs(x) / beta) ** alpha)
# Pairwise products of neighboring MSCN coefficients
def calculate_pair_product_coefficients(mscn_coefficients):
return collections.OrderedDict({
'mscn': mscn_coefficients,
'horizontal': mscn_coefficients[:, :-1] * mscn_coefficients[:, 1:],
'vertical': mscn_coefficients[:-1, :] * mscn_coefficients[1:, :],
'main_diagonal': mscn_coefficients[:-1, :-1] * mscn_coefficients[1:, 1:],
'secondary_diagonal': mscn_coefficients[1:, :-1] * mscn_coefficients[:-1, 1:]
})
# Asymmetric Generalized Gaussian Distribution (AGGD) model
def asymmetric_generalized_gaussian(x, nu, sigma_l, sigma_r):
def beta(sigma):
return sigma * np.sqrt(special.gamma(1 / nu) / special.gamma(3 / nu))
coefficient = nu / ((beta(sigma_l) + beta(sigma_r)) * special.gamma(1 / nu))
f = lambda x, sigma: coefficient * np.exp(-(x / beta(sigma)) ** nu)
return np.where(x < 0, f(-x, sigma_l), f(x, sigma_r))
# Fitting Asymmetric Generalized Gaussian Distribution
def asymmetric_generalized_gaussian_fit(x):
def estimate_phi(alpha):
numerator = special.gamma(2 / alpha) ** 2
denominator = special.gamma(1 / alpha) * special.gamma(3 / alpha)
return numerator / denominator
def estimate_r_hat(x):
size = np.prod(x.shape)
return (np.sum(np.abs(x)) / size) ** 2 / (np.sum(x ** 2) / size)
def estimate_R_hat(r_hat, gamma):
numerator = (gamma ** 3 + 1) * (gamma + 1)
denominator = (gamma ** 2 + 1) ** 2
return r_hat * numerator / denominator
def mean_squares_sum(x, filter=lambda z: z == z):
filtered_values = x[filter(x)]
squares_sum = np.sum(filtered_values ** 2)
return squares_sum / ((filtered_values.shape))
def estimate_gamma(x):
left_squares = mean_squares_sum(x, lambda z: z < 0)
right_squares = mean_squares_sum(x, lambda z: z >= 0)
return np.sqrt(left_squares) / np.sqrt(right_squares)
def estimate_alpha(x):
r_hat = estimate_r_hat(x)
gamma = estimate_gamma(x)
R_hat = estimate_R_hat(r_hat, gamma)
solution = optimize.root(lambda z: estimate_phi(z) - R_hat, [0.2]).x
return solution[0]
def estimate_sigma(x, alpha, filter=lambda z: z < 0):
return np.sqrt(mean_squares_sum(x, filter))
def estimate_mean(alpha, sigma_l, sigma_r):
return (sigma_r - sigma_l) * constant * (special.gamma(2 / alpha) / special.gamma(1 / alpha))
alpha = estimate_alpha(x)
sigma_l = estimate_sigma(x, alpha, lambda z: z < 0)
sigma_r = estimate_sigma(x, alpha, lambda z: z >= 0)
constant = np.sqrt(special.gamma(1 / alpha) / special.gamma(3 / alpha))
mean = estimate_mean(alpha, sigma_l, sigma_r)
return alpha, mean, sigma_l, sigma_r
# Calculate BRISQUE features
def calculate_brisque_features(image, kernel_size=7, sigma=7 / 6):
def calculate_features(coefficients_name, coefficients, accum=np.array([])):
alpha, mean, sigma_l, sigma_r = asymmetric_generalized_gaussian_fit(coefficients)
if coefficients_name == 'mscn':
var = (sigma_l ** 2 + sigma_r ** 2) / 2
return [alpha, var]
return [alpha, mean, sigma_l ** 2, sigma_r ** 2]
mscn_coefficients = calculate_mscn_coefficients(image, kernel_size, sigma)
coefficients = calculate_pair_product_coefficients(mscn_coefficients)
features = [calculate_features(name, coeff) for name, coeff in coefficients.items()]
flatten_features = list(chain.from_iterable(features))
return np.array(flatten_features, dtype=object)
# Loading image from local machine
def load_image(file):
return cv2.imread(file)
# return skimage.io.imread("img.png", plugin='pil')
path = "C:\\Users\\Krishna\\PycharmProjects\\ImageScore\\images2\\"
image_list = listdir(path)
for file in image_list:
image = load_image(path+file)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# image = load_image()
# gray_image = skimage.color.rgb2gray(image)
# _ = skimage.io.imshow(image)
#%%time
# Calculate Coefficients
mscn_coefficients = calculate_mscn_coefficients(gray_image, 7, 7/6)
coefficients = calculate_pair_product_coefficients(mscn_coefficients)
# Fit Coefficients to Generalized Gaussian Distributions
brisque_features = calculate_brisque_features(gray_image, kernel_size=7, sigma=7/6)
# Resize Image and Calculate BRISQUE Features
downscaled_image = cv2.resize(gray_image, None, fx=1/2, fy=1/2, interpolation = cv2.INTER_CUBIC)
downscale_brisque_features = calculate_brisque_features(downscaled_image, kernel_size=7, sigma=7/6)
brisque_features = np.concatenate((brisque_features, downscale_brisque_features))
# a pretrained SVR model to calculate the quality assessment. However, in order to have good results, we need to scale the features to [-1, 1]
def scale_features(features):
with open('normalize.pickle', 'rb') as handle:
scale_params = pickle.load(handle)
min_ = np.array(scale_params['min_'])
max_ = np.array(scale_params['max_'])
return -1 + (2.0 / (max_ - min_) * (features - min_))
def calculate_image_quality_score(brisque_features):
model = svmutil.svm_load_model('brisque_svm.txt')
scaled_brisque_features = scale_features(brisque_features)
x, idx = svmutil.gen_svm_nodearray(
scaled_brisque_features,
isKernel=(model.param.kernel_type == svmutil.PRECOMPUTED))
nr_classifier = 1
prob_estimates = (svmutil.c_double * nr_classifier)()
return svmutil.libsvm.svm_predict_probability(model, x, prob_estimates)
print(calculate_image_quality_score(brisque_features))
Here is one output for the quality score I am getting for one of the "text-based image"
156.04440687506016
I am stuck on tracing a PyTorch model on this specific module with an error:
RuntimeError: 0INTERNAL ASSERT FAILED at "../torch/csrc/jit/ir/alias_analysis.cpp":611, please report a bug to PyTorch. We don't have an op for aten::fill_ but it isn't a special case. Argument types: Tensor, bool,
Candidates:
aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> (Tensor(a!))
aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> (Tensor(a!))
Here is the reduced code example to reproduce the bug:
import torch
import torch.nn.functional as F
import torch.nn as nn
class SurroundPattern(nn.Module):
def __init__(self, crop_size=1./2):
super(SurroundPattern, self).__init__()
self.crop_size = crop_size
def forward(self, x, s):
H,W = x.shape[2:]
crop_h = (int(H / 2 - self.crop_size / 2 * H), int(H / 2 + self.crop_size / 2 * H))
crop_w = (int(W / 2 - self.crop_size / 2 * W), int(W / 2 + self.crop_size / 2 * W))
x_mask = torch.zeros(H,W,device=x.device, dtype=torch.bool)
x_mask[crop_h[0] : crop_h[1], crop_w[0] : crop_w[1]] = True
inside_indices = torch.where(x_mask)
inside_part = x[:, :, inside_indices[0], inside_indices[1]]
inside_feat = inside_part.mean(2)
outside_indices = torch.where(~x_mask)
outside_part = x[:, :, outside_indices[0], outside_indices[1]]
outside_feat = outside_part.mean(2)
fused = torch.stack([inside_feat, outside_feat], dim=2).unsqueeze(3)
if s is None:
return fused
SH,SW = s.shape[2:]
crop_sh = (int(SH / 2 - self.crop_size / 2 * SH), int(SH / 2 + self.crop_size / 2 * SH))
crop_sw = (int(SW / 2 - self.crop_size / 2 * SW), int(SW / 2 + self.crop_size / 2 * SW))
s_mask = torch.zeros(SH, SW, device=s.device, dtype=torch.bool)
s_mask[crop_sh[0] : crop_sh[1], crop_sw[0] : crop_sw[1]] = True
s_inside_indices = torch.where(s_mask)
inside_sal = s[:, :, s_inside_indices[0], s_inside_indices[1]].flatten(1)
s_outside_indices = torch.where(~s_mask)
outside_sal = s[:, :, s_outside_indices[0], s_outside_indices[1]].flatten(1)
if outside_sal.shape != inside_sal.shape:
outside_sal = F.adaptive_max_pool1d(outside_sal.unsqueeze(1), output_size=784)
outside_sal = outside_sal.squeeze(1)
fused_sal = torch.stack([inside_sal, outside_sal], dim=2).unsqueeze(3)
return fused, fused_sal
x = torch.randn(2, 512, 7, 7)
s = torch.randn(2, 1, 56, 56)
patt = SurroundPattern()
traced_cell = torch.jit.trace(patt, (x, s))
print(traced_cell)
How to figure out where exactly the problem is? Is there a way to fix it with another functions?
Thank you!
The problem is that you try to fill in a bool Tensor which is apparently not yet supported in jit (or a bug)
Replacing this:
x_mask= torch.zeros(H,W,device=x.device, dtype=torch.bool)
x_mask[crop_h[0] : crop_h[1], crop_w[0] : crop_w[1]] = True
with:
x_mask= torch.zeros(H,W,device=x.device)
x_mask[crop_h[0] : crop_h[1], crop_w[0] : crop_w[1]] = 1
should resolve the error. This of course is suboptimal to the target Tensor type but you should be able to perform any other operation you would be doing with torch.BoolTensor
How to fill between two lines with different x and y? Now, the filling is for two y functions with the common x-axis, which is not true. When I tried x1, x2, y1, y2 I have got a worse result than displayed below.
import matplotlib.pyplot as plt
import numpy as np
from numpy import exp, sin
def g(y):
amp = 0.6
return amp*exp(-2.5*y)*sin(9.8*y)
def g_e(y):
amp = 0.66
return amp*exp(-2.5*y_e)*sin(8.1*y_e)
y = np.linspace(0, 0.83, 501)
y_e = np.linspace(0, 1.08, 501)
values = g(y)
values_e = g_e(y)
theta = np.radians(-65.9)
c, s = np.cos(theta), np.sin(theta)
rot_matrix = np.array(((c, s), (-s, c)))
xy = np.array([y, values]).T # rot_matrix
theta_e = np.radians(-60)
c_e, s_e = np.cos(theta_e), np.sin(theta_e)
rot_matrix_e = np.array(((c_e, s_e), (-s_e, c_e)))
xy_e = np.array([y, values_e]).T # rot_matrix_e
fig, ax = plt.subplots(figsize=(5,5))
ax.axis('equal')
x_shift = 0.59
y_shift = 0.813
x_shift_e = 0.54
y_shift_e = 0.83
ax.plot(xy[:, 0]+x_shift, xy[:, 1]+y_shift, c='red')
ax.plot(xy_e[:, 0]+x_shift_e, xy_e[:, 1]+y_shift_e, c='black')
ax.fill_between(xy[:, 0]+x_shift, xy[:, 1]+y_shift, xy_e[:, 1]+y_shift_e)
plt.show()
Script for additional question:
for i in range(len(x)-1):
for j in range(i-1):
xs_ys = intersection(x[i],x[i+1],x[j],x[j+1],y[i],y[i+1],y[j],y[j+1])
if xs_ys in not None:
xs.append(xs_ys[0])
ys.append(xs_ys[1])
I got an error:
if xs_ys in not None:
^
SyntaxError: invalid syntax
Here is an approach creating a "polygon" by concatenating the reverse of one curve to the other curve. ax.fill() can be used to fill the polygon. Note that fill_between() can look strange when the x-values aren't nicely ordered (as is the case here after the rotation). Also, the mirror function fill_betweenx() wouldn't be adequate in this case.
import matplotlib.pyplot as plt
import numpy as np
def g(y):
amp = 0.6
return amp * np.exp(-2.5 * y) * np.sin(9.8 * y)
def g_e(y):
amp = 0.66
return amp * np.exp(-2.5 * y_e) * np.sin(8.1 * y_e)
y = np.linspace(0, 0.83, 501)
y_e = np.linspace(0, 1.08, 501)
values = g(y)
values_e = g_e(y)
theta = np.radians(-65.9)
c, s = np.cos(theta), np.sin(theta)
rot_matrix = np.array(((c, s), (-s, c)))
xy = np.array([y, values]).T # rot_matrix
theta_e = np.radians(-60)
c_e, s_e = np.cos(theta_e), np.sin(theta_e)
rot_matrix_e = np.array(((c_e, s_e), (-s_e, c_e)))
xy_e = np.array([y, values_e]).T # rot_matrix_e
fig, ax = plt.subplots(figsize=(5, 5))
ax.axis('equal')
x_shift = 0.59
y_shift = 0.813
x_shift_e = 0.54
y_shift_e = 0.83
xf = np.concatenate([xy[:, 0] + x_shift, xy_e[::-1, 0] + x_shift_e])
yf = np.concatenate([xy[:, 1] + y_shift, xy_e[::-1, 1] + y_shift_e])
ax.plot(xy[:, 0] + x_shift, xy[:, 1] + y_shift, c='red')
ax.plot(xy_e[:, 0] + x_shift_e, xy_e[:, 1] + y_shift_e, c='black')
ax.fill(xf, yf, color='dodgerblue', alpha=0.3)
plt.show()
For reasons, I need to implement the Runge-Kutta4 method in PyTorch (so no, I'm not going to use scipy.odeint). I tried and I get weird results on the simplest test case, solving x'=x with x(0)=1 (analytical solution: x=exp(t)). Basically, as I reduce the time step, I cannot get the numerical error to go down. I'm able to do it with a simpler Euler method, but not with the Runge-Kutta 4 method, which makes me suspect some floating point issue here (maybe I'm missing some hidden conversion from double precision to single)?
import torch
import numpy as np
import matplotlib.pyplot as plt
def Euler(f, IC, time_grid):
y0 = torch.tensor([IC])
time_grid = time_grid.to(y0[0])
values = y0
for i in range(0, time_grid.shape[0] - 1):
t_i = time_grid[i]
t_next = time_grid[i+1]
y_i = values[i]
dt = t_next - t_i
dy = f(t_i, y_i) * dt
y_next = y_i + dy
y_next = y_next.unsqueeze(0)
values = torch.cat((values, y_next), dim=0)
return values
def RungeKutta4(f, IC, time_grid):
y0 = torch.tensor([IC])
time_grid = time_grid.to(y0[0])
values = y0
for i in range(0, time_grid.shape[0] - 1):
t_i = time_grid[i]
t_next = time_grid[i+1]
y_i = values[i]
dt = t_next - t_i
dtd2 = 0.5 * dt
f1 = f(t_i, y_i)
f2 = f(t_i + dtd2, y_i + dtd2 * f1)
f3 = f(t_i + dtd2, y_i + dtd2 * f2)
f4 = f(t_next, y_i + dt * f3)
dy = 1/6 * dt * (f1 + 2 * (f2 + f3) +f4)
y_next = y_i + dy
y_next = y_next.unsqueeze(0)
values = torch.cat((values, y_next), dim=0)
return values
# differential equation
def f(T, X):
return X
# initial condition
IC = 1.
# integration interval
def integration_interval(steps, ND=1):
return torch.linspace(0, ND, steps)
# analytical solution
def analytical_solution(t_range):
return np.exp(t_range)
# test a numerical method
def test_method(method, t_range, analytical_solution):
numerical_solution = method(f, IC, t_range)
L_inf_err = torch.dist(numerical_solution, analytical_solution, float('inf'))
return L_inf_err
if __name__ == '__main__':
Euler_error = np.array([0.,0.,0.])
RungeKutta4_error = np.array([0.,0.,0.])
indices = np.arange(1, Euler_error.shape[0]+1)
n_steps = np.power(10, indices)
for i, n in np.ndenumerate(n_steps):
t_range = integration_interval(steps=n)
solution = analytical_solution(t_range)
Euler_error[i] = test_method(Euler, t_range, solution).numpy()
RungeKutta4_error[i] = test_method(RungeKutta4, t_range, solution).numpy()
plots_path = "./plots"
a = plt.figure()
plt.xscale('log')
plt.yscale('log')
plt.plot(n_steps, Euler_error, label="Euler error", linestyle='-')
plt.plot(n_steps, RungeKutta4_error, label="RungeKutta 4 error", linestyle='-.')
plt.legend()
plt.savefig(plots_path + "/errors.png")
The result:
As you can see, the Euler method converges (slowly, as expected of a first order method). However, the Runge-Kutta4 method does not converge as the time step gets smaller and smaller. The error goes down initially, and then up again. What's the issue here?
The reason is indeed a floating point precision issue. torch defaults to single precision, so once the truncation error becomes small enough, the total error is basically determined by the roundoff error, and reducing the truncation error further by increasing the number of steps <=> decreasing the time step doesn't lead to any decrease in the total error.
To fix this, we need to enforce double precision 64bit floats for all floating point torch tensors and numpy arrays. Note that the right way to do this is to use respectively torch.float64 and np.float64 rather than, e.g., torch.double and np.double, because the former are fixed-sized float values, (always 64bit) while the latter depend on the machine and/or compiler. Here's the fixed code:
import torch
import numpy as np
import matplotlib.pyplot as plt
def Euler(f, IC, time_grid):
y0 = torch.tensor([IC], dtype=torch.float64)
time_grid = time_grid.to(y0[0])
values = y0
for i in range(0, time_grid.shape[0] - 1):
t_i = time_grid[i]
t_next = time_grid[i+1]
y_i = values[i]
dt = t_next - t_i
dy = f(t_i, y_i) * dt
y_next = y_i + dy
y_next = y_next.unsqueeze(0)
values = torch.cat((values, y_next), dim=0)
return values
def RungeKutta4(f, IC, time_grid):
y0 = torch.tensor([IC], dtype=torch.float64)
time_grid = time_grid.to(y0[0])
values = y0
for i in range(0, time_grid.shape[0] - 1):
t_i = time_grid[i]
t_next = time_grid[i+1]
y_i = values[i]
dt = t_next - t_i
dtd2 = 0.5 * dt
f1 = f(t_i, y_i)
f2 = f(t_i + dtd2, y_i + dtd2 * f1)
f3 = f(t_i + dtd2, y_i + dtd2 * f2)
f4 = f(t_next, y_i + dt * f3)
dy = 1/6 * dt * (f1 + 2 * (f2 + f3) +f4)
y_next = y_i + dy
y_next = y_next.unsqueeze(0)
values = torch.cat((values, y_next), dim=0)
return values
# differential equation
def f(T, X):
return X
# initial condition
IC = 1.
# integration interval
def integration_interval(steps, ND=1):
return torch.linspace(0, ND, steps, dtype=torch.float64)
# analytical solution
def analytical_solution(t_range):
return np.exp(t_range, dtype=np.float64)
# test a numerical method
def test_method(method, t_range, analytical_solution):
numerical_solution = method(f, IC, t_range)
L_inf_err = torch.dist(numerical_solution, analytical_solution, float('inf'))
return L_inf_err
if __name__ == '__main__':
Euler_error = np.array([0.,0.,0.], dtype=np.float64)
RungeKutta4_error = np.array([0.,0.,0.], dtype=np.float64)
indices = np.arange(1, Euler_error.shape[0]+1)
n_steps = np.power(10, indices)
for i, n in np.ndenumerate(n_steps):
t_range = integration_interval(steps=n)
solution = analytical_solution(t_range)
Euler_error[i] = test_method(Euler, t_range, solution).numpy()
RungeKutta4_error[i] = test_method(RungeKutta4, t_range, solution).numpy()
plots_path = "./plots"
a = plt.figure()
plt.xscale('log')
plt.yscale('log')
plt.plot(n_steps, Euler_error, label="Euler error", linestyle='-')
plt.plot(n_steps, RungeKutta4_error, label="RungeKutta 4 error", linestyle='-.')
plt.legend()
plt.savefig(plots_path + "/errors.png")
Result:
Now, as we decrease the time step, the error of the RungeKutta4 approximation decreases with the correct rate.
I am trying to implement Floyd-Steinberg dithering in Python after using KMeans. I realised, that after dithering I receive colours which are not included in the reduced palette, so I modify the image again with KMeans. However, when trying with this picture, I see no dithering at all. I got stucked, I got tired - please, help me. My ideas become almost extinct.
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
k = 16
im = Image.open('Image.png').convert('RGB') #Image converted to RGB
pic = np.array(im, dtype = np.float)/255 #Enables imshow()
im.close()
def kmeans(pic): #Prepares algorithmic data
v, c, s = pic.shape
repic = np.resize(pic, (c*v, 3))
kme = KMeans(n_clusters = k).fit(repic)
cl = kme.cluster_centers_
return kme, cl, repic, v, c
kme, cl, repic, v, c = kmeans(pic)
pred = kme.predict(repic)
def picture(v, c, cl, pred): #Creates a picture with reduced colors
image = np.ones((v, c, 3))
ind = 0
for i in range(v):
for j in range(c):
image[i][j] = cl[pred[ind]]
ind+=1
return image
image = picture(v, c, cl, pred)
def dither(pic, image): #Floyd-Steinberg dithering
v, c, s = pic.shape
Floyd = np.copy(image)
for i in range(1, v-1):
for j in range(1, c-1):
quan = pic[i][j] - image[i][j]
Floyd[i][j + 1] = quan * (np.float(7 / 16)) + pic[i][j + 1]
Floyd[i + 1][j - 1] = quan * (np.float(5 / 16)) + pic[i + 1][j - 1]
Floyd[i + 1][j] = quan * (np.float(3 / 16)) + pic[i + 1][j]
Floyd[i + 1][j + 1] = quan * (np.float(1 / 16)) + pic[i + 1][j + 1]
return Floyd
fld = dither(pic, image)
a1, a2, reim, a3, a4 = kmeans(fld)
lab = kme.predict(reim)
Floyd = picture(v, c, cl, lab)
plt.imshow(Floyd)
plt.show()