I'm using actix-web to develop a function where the user can upload some data and then draw the data on a picture to generate a simple poster.
Here is some of my code:
#[derive(Debug, Deserialize)]
pub struct UserForm {
name: String,
age: u8,
}
#[post("/generate_user_poster")]
pub async fn generate_user_poster(form: web::Form<Info>) -> HttpResult {
let form = query.into_inner();
// This is pseudo code
let img = image::load("bg.jpg");
img.drawText(100, 100, form.name);
img.drawText(100, 200, form.age);
Ok(HttpResponse::Ok().body(img))
}
How can I draw text onto bg.jpg.
The most simple way to do this is probably using the imageproc crate. You can use draw_text_mut to add the text to the image in place like so:
#[post("/generate_user_poster")]
pub async fn generate_user_poster(form: web::Form<Info>) -> HttpResult {
let form = query.into_inner();
let mut img = image::load("bg.jpg");
// Load a font.
let font = Vec::from(include_bytes!("path/to/font.ttf") as &[u8]);
let font = Font::try_from_vec(font).unwrap();
let font_size = 40.0;
let scale = Scale {
x: font_size,
y: font_size,
};
// Draw text to image and mutate it:
draw_text_mut(
&mut image,
Rgba([0u8, 0u8, 0u8, 255u8]),
100, // Your x position in pixels
100, // Your y position in pixels
scale,
&font,
form.name // Must be of type &str
);
draw_text_mut(
&mut image,
Rgba([0u8, 0u8, 0u8, 255u8]),
200,
200,
scale,
&font,
form.age // Must be of type &str
);
Ok(HttpResponse::Ok().body(img))
}
This is a more complete example:
[dependencies]
image = "0.23.14"
imageproc = "0.22.0"
rusttype = "0.9.2"
use image::{imageops::overlay, GenericImageView, Rgba};
use imageproc::drawing::draw_text_mut;
use rusttype::{Font, Scale};
fn main() {
let mut img = image::open("bg.jpg").unwrap();
// Load a font.
let font = Vec::from(include_bytes!("../arial.ttf") as &[u8]);
let font = Font::try_from_vec(font).unwrap();
let font_size = 40.0;
let scale = Scale {
x: font_size,
y: font_size,
};
// draw text
let name = "hello world";
draw_text_mut(
&mut img,
Rgba([255u8, 0u8, 0u8, 255u8]),
100, // x
100, // y
scale,
&font,
name, // Must be of type &str
);
// overlay another image on top of the image
let tile = image::open("avatar.png").unwrap();
let (w, h) = img.dimensions();
let (w2, h2) = tile.dimensions();
overlay(&mut img, &tile, w / 2 - w2 / 2, h / 2 - h2 / 2);
// re-output a new image
img.save("out.png").unwrap();
}
|-- arial.ttf
|-- avatar.png
|-- bg.jpg
|-- Cargo.toml
|-- out.png
|-- src
| `-- main.rs
`-- target
Related
I am trying to create gradient blobs. I supply the gen function with a Vec<ColorPoint> and each point should have go from a maximum intensity at the center to no effect at the radius. The main problem is that I am not getting the colors I expect in the output image.
use image::{ImageBuffer, RgbImage, Rgb};
use std::ops::Add;
/// Color with red, green, blue and alpha channels between 0.0 and 1.0
#[derive(Debug)]
struct RgbaColor {
r: f64,
g: f64,
b: f64,
a: f64,
}
impl RgbaColor {
fn to_rgb(&self) -> Rgb<u8> {
let r = (&self.r*255.0) as u8;
let g = (&self.g*255.0) as u8;
let b = (&self.b*255.0) as u8;
Rgb::from([r, g, b])
}
}
impl Add for RgbaColor {
type Output = Self;
fn add(self, fg: Self) -> Self {
// self is the background and fg is the foreground
let new_alpha = fg.a + self.a * (1 - fg.a);
Self {
r: (fg.r * fg.a + self.r * self.a * (1.0 - fg.a)) / new_alpha,
g: (fg.g * fg.a + self.g * self.a * (1.0 - fg.a)) / new_alpha,
b: (fg.b * fg.a + self.b * self.a * (1.0 - fg.a)) / new_alpha,
a: new_alpha
}
}
}
#[derive(Debug)]
struct ColorPoint {
color: RgbaColor,
center: (u32, u32),
radius: u32,
}
impl ColorPoint {
fn rgba_color_at_point(&self, x: u32, y: u32) -> RgbaColor {
let x_dist = x as f64 - self.center.0 as f64;
let y_dist = y as f64 - self.center.1 as f64;
let dist = (x_dist.powf(2.0) + y_dist.powf(2.0)).sqrt();
RgbaColor {
r: self.color.r,
g: self.color.g,
b: self.color.b,
a: self.color.a * dist / self.radius as f64,
}
}
}
fn main() {
let color_points = vec![
ColorPoint{color: RgbaColor{r: 1.0, g:1.0, b:0.0, a:1.0}, center: (0, 50), radius: 20},
ColorPoint{color: RgbaColor{r: 1.0, g:0.0, b:0.0, a:1.0}, center: (10, 0), radius: 30}
];
gen(color_points)
}
fn gen(color_points: Vec<ColorPoint>) {
let geometry = (50, 100);
let mut background: RgbImage = ImageBuffer::new(geometry.0 as u32, geometry.1 as u32);
for (x, y, pixel) in background.enumerate_pixels_mut() {
let mut curr_color = RgbaColor{ r:0.0, g:0.0, b:0.0, a:1.0 }; // hardcoded background color
for color_point in color_points.iter() {
curr_color = curr_color + color_point.rgba_color_at_point(x,y);
}
*pixel = curr_color.to_rgb();
}
background.save("image.png").unwrap();
}
Output:
This code almost does what I expect although the position of the yellow and red blobs seem to have swapped and when I change the hard coded background color to white RgbaColor{ r:1.0, g:1.0, b:1.0, a:1.0 } I seem to have a magenta background.
I'm not sure whether my color model is wrong or if it's something else because when adding individual RgbaColors I get the correct colors.
I am trying to render text in a separate function using piston2d / piston_window. I am able to draw text just fine, but I can't figure out how to pass the appropriate parameters into a separate function.
I have studied What is GlyphCache type in a function to render text in Piston2d and adjusted my code accordingly, but I can't make sense of the error I am getting.
use piston_window::*;
fn main() {
let font = include_bytes!("IBMPlexSans-Regular.ttf");
let opengl = OpenGL::V3_2;
let settings = WindowSettings::new("test", [500, 500])
.graphics_api(opengl)
.fullscreen(false)
.vsync(true)
.exit_on_esc(true);
let mut window: PistonWindow = settings.build().unwrap();
let mut glyphs = Glyphs::from_bytes(
font,
window.create_texture_context(),
TextureSettings::new(),
)
.unwrap();
while let Some(e) = window.next() {
window.draw_2d(&e, |c, gfx, device| {
clear([0.2; 4], gfx);
text::Text::new_color([1.0, 1.0, 1.0, 0.7], 30)
.draw(
"Hi!",
&mut glyphs,
&c.draw_state,
c.transform
.trans(100., 100.),
gfx,
)
.unwrap();
glyphs.factory.encoder.flush(device);
});
}
}
fn render_text(
x: f64,
y: f64,
text: &str,
size: u32,
c: Context,
g: &mut G2d,
glyphs: &mut glyph_cache::rusttype::GlyphCache<GfxFactory, G2dTexture>,
) {
text::Text::new(size)
.draw(text, glyphs, &c.draw_state, c.transform.trans(x, y), g)
.unwrap();
}
I am receiving the following error:
error[E0277]: the trait bound `Texture<gfx_device_gl::Resources>: UpdateTexture<gfx_device_gl::factory::Factory>` is not satisfied
--> src/main.rs:54:10
|
54 | .draw(text, glyphs, &c.draw_state, c.transform.trans(x, y), g)
| ^^^^ the trait `UpdateTexture<gfx_device_gl::factory::Factory>` is not implemented for `Texture<gfx_device_gl::Resources>`
|
= help: the following implementations were found:
<Texture<R> as UpdateTexture<TextureContext<F, R, C>>>
= note: required because of the requirements on the impl of `CharacterCache` for `GlyphCache<'_, gfx_device_gl::factory::Factory, Texture<gfx_device_gl::Resources>>`
I am aware this is probably very piston-specific, but I would be very happy about any pointers.
Just had the same problem. It's almost a year after but documentation for piston_window isn't the best so maybe others will need it.
This worked for me
use piston_window::types::Color;
use piston_window::{text, Context, G2d, Glyphs, Transformed};
pub const text_color: Color = [1.0, 1.0, 1.0, 1.0];
pub fn draw_text(
ctx: &Context,
graphics: &mut G2d,
glyphs: &mut Glyphs,
color: Color,
pos: Position,
text: &str,
) {
text::Text::new_color(color, 20)
.draw(
text,
glyphs,
&ctx.draw_state,
ctx.transform.trans(pos.x as f64, pos.y as f64),
graphics,
)
.unwrap();
}
pub struct Pos {
pub x: f64,
pub y: f64,
}
pub fn main() {
let assets = find_folder::Search::ParentsThenKids(3, 3)
.for_folder("assets")
.unwrap();
let ref font = assets.join("retro-gaming.ttf");
let mut glyphs = window.load_font(font).unwrap();
let size = [500., 500.];
let mut window: PistonWindow = WindowSettings::new("Test", size)
.resizable(false)
.exit_on_esc(true)
.build()
.unwrap();
while let Some(event) = window.next() {
window.draw_2d(&event, |ctx, g, _| {
draw_text(&ctx, g, &mut glyphs, text_color, Pos { x: 0, y: 10 }, "20")
}
}
}
Note that I'm using different "Glyphs" than you are. There's additionally one more dependency in "Cargo.toml" namely find_folder = "0.3.0".
I'm not entirely sure whether the snippet above compiles and works. It is a quick refactor from this commit https://github.com/laszukdawid/rsnake/commit/e6e23563ebdd9c7b972ee17b5d0299e2202358cf.
I'm using the noise crate and having trouble understanding how to convert their Color type to an RGB value.
noise = "0.7.0"
pub type Color = [u8; 4];
I'm trying to use the get_value() function, seen here in the docs as:
pub fn get_value(&self, x: usize, y: usize) -> Color {
let (width, height) = self.size;
if x < width && y < height {
self.map[x + y * width]
} else {
self.border_color
}
}
get_value() is implemented for PlaneMapBuilder. So I would expect PlaneMapBuilder::get_value(x,y) to return something of the format [r,g,b,a], but this does not happen:
extern crate noise;
use noise::{utils::*, Billow};
fn main() {
let mut my_noise = PlaneMapBuilder::new(&Billow::new()).build();
let my_val = my_noise.get_value(1,1);
println!("{}", my_val.to_string());
///returns something like -0.610765515150546, not a [u8;4] as I would expect
}
In the docs I see this definition of add_gradient_point() which takes a Color as a parameter:
pub fn add_gradient_point(mut self, pos: f64, color: Color) -> Self {
// check to see if the vector already contains the input point.
if !self
.gradient_points
.iter()
.any(|&x| (x.pos - pos).abs() < std::f64::EPSILON)
{
// it doesn't, so find the correct position to insert the new
// control point.
let insertion_point = self.find_insertion_point(pos);
// add the new control point at the correct position.
self.gradient_points
.insert(insertion_point, GradientPoint { pos, color });
}
self
}
Here they use the [u8; 4] structure I would expect for the Color type:
let jade_gradient = ColorGradient::new()
.clear_gradient()
.add_gradient_point(-1.000, [24, 146, 102, 255])
.add_gradient_point(0.000, [78, 154, 115, 255])
What could explain this behavior?
get_value() is implemented for PlaneMapBuilder
You are correct that PlaneMapBuilder "implements" get_value(). However, it is not get_value() from NoiseImage. It is actually NoiseMap, where its get_value() returns a f64 and not Color.
Depending on what kind of "colors" you'd want, then you could instead use ImageRenderer and call its render() method with &my_noise, which returns a NoiseImage.
// noise = "0.7.0"
use noise::{utils::*, Billow};
fn main() {
let my_noise = PlaneMapBuilder::new(&Billow::new()).build();
let image = ImageRenderer::new().render(&my_noise);
let my_val = image.get_value(1, 1);
println!("{:?}", my_val);
// Prints: `[18, 18, 18, 255]`
}
Here they use the [u8; 4] structure I would expect for the Color type
Just to be clear, those are the same thing in this case. In short the type keyword allows you to define new "type aliases" for an existing types. Essentially, you'd be able to give a complex type a shorthand name. However, they are still the same type.
I'm trying to write a raytracer in Rust. I'm having difficulty getting the for loops to run in parallel. I'm not sure where the problem is, but I can't seem to get anything on the screen. Is this the correct approach or am I completely heading in the wrong direction?
I've tried running the for loops without multi-threading and it does correctly produce output. I've also added loggers to the consumer loop and I'm getting the correct values as well. It just doesn't seem to update the window.
#[derive(Clone, Copy)]
pub struct Pixel {
pub x: usize,
pub y: usize,
pub color: Vec3,
}
let mut buffer : Vec<u32> = vec![0; WIDTH * HEIGHT];
let (tx, rx) = mpsc::channel()
for x in 0..HEIGHT {
let tx_t = tx.clone();
thread::spawn(move || {
for y in 0..WIDTH {
let mut color = cast_ray(x, y); // returns vec3
let pixel = Pixel { x: x, y: y, color: color };
tx_t.send(pixel).unwrap();
}
});
}
for received in rx {
buffer[received.x * WIDTH + received.y] = received.color.x << 16 | received.color.y << 8 | received.color.z;
}
while window.is_open() && !window.is_key_down(Key::Escape) {
window.update_with_buffer(&buffer).unwrap();
}
I'm expecting a few spheres or color to appear on the screen, but it's just black.
I'm making a game using ggez, using a camera from ggez_goodies.
This camera only has rotation from the top left of an image, but I want it from the center. Is there a good way to position the image so that it rotates around the center instead?
I'd assume you'd just change the position to fix this, what I have currently is just the position
self.image
.draw_camera(
camera,
ctx,
graphics::Point2::new(self.position.0, self.position.1),
self.rotation,
)
.unwrap();
I'm guessing a fix would look sort of like this:
self.image
.draw_camera(
camera,
ctx,
graphics::Point2::new(
self.position.0 + self.rotation_offset.0,
self.position.1 + self.rotation_offset.1,
),
self.rotation,
)
.unwrap();
I think it would be possible with an offset, but I can't figure out how to set the offset based off of the rotation angle.
What offset/change could I make to the position to get the image to rotate based around the center instead of around the top left corner?
In following example:
use ggez;
use ggez::event;
use ggez::graphics;
use ggez::graphics::{DrawMode, MeshBuilder};
use ggez::nalgebra as na;
use ggez::timer::check_update_time;
use ggez::{Context, GameResult};
use std::env;
use std::path;
struct MainState {
rotate: f32,
sprite_batch: graphics::spritebatch::SpriteBatch,
}
impl MainState {
fn new(ctx: &mut Context) -> GameResult<MainState> {
let image = graphics::Image::new(ctx, "/sprite_sheet.png").unwrap();
let batch = graphics::spritebatch::SpriteBatch::new(image);
Ok(MainState {rotate: 0.0, sprite_batch: batch})
}
}
impl event::EventHandler for MainState {
fn update(&mut self, ctx: &mut Context) -> GameResult {
while check_update_time(ctx, 30) {
self.rotate += 0.1;
}
Ok(())
}
fn draw(&mut self, ctx: &mut Context) -> GameResult {
graphics::clear(ctx, graphics::BLACK);
let mesh = MeshBuilder::new().circle(
DrawMode::fill(),
na::Point2::new(400.0, 300.0),
2.0,
2.0,
graphics::WHITE,
).build(ctx)?;
self.sprite_batch.add(
graphics::DrawParam::new()
.src(graphics::Rect::new(0.0, 0.0, 1.0, 1.0))
.rotation(self.rotate)
.dest(na::Point2::new(400.0, 300.0))
.offset(na::Point2::new(0.5, 0.5)),
);
graphics::draw(
ctx,
&self.sprite_batch,
graphics::DrawParam::new().dest(na::Point2::new(0.0, 0.0)),
)?;
graphics::draw(
ctx,
&mesh,
graphics::DrawParam::new().dest(na::Point2::new(0.0, 0.0)),
)?;
self.sprite_batch.clear();
graphics::present(ctx)?;
Ok(())
}
}
pub fn main() -> GameResult {
let resource_dir = if let Ok(manifest_dir) = env::var("CARGO_MANIFEST_DIR") {
let mut path = path::PathBuf::from(manifest_dir);
path.push("resources");
path
} else {
path::PathBuf::from("./resources")
};
let cb = ggez::ContextBuilder::new("oc", "bux")
.add_resource_path(resource_dir)
.window_mode(ggez::conf::WindowMode::default().dimensions(800.0, 600.0));
let (ctx, event_loop) = &mut cb.build()?;
let state = &mut MainState::new(ctx)?;
event::run(ctx, event_loop, state)
}
Where resources/sprite_sheet.png is:
The which do rotation from the center of the image is:
.offset(na::Point2::new(0.5, 0.5))