I would like to render a live wallpaper into the X root window. This is currently not possible directly through the API glutin exposes.
I found a post where the asker seems to eventually figure it out: How can I make a window override-redirect with glutin?.
However, it seems that the code snippet from the answer is not enough to achieve the desired effect: once the flag is set, the previously-created window disappears, but nothing is rendered to the root window. What am I missing?
Here's what I have so far:
let mut events_loop = glium::glutin::EventsLoop::new();
let context = glium::glutin::ContextBuilder::new();
let window = glium::glutin::WindowBuilder::new()
.with_dimensions(800, 600)
.with_title("TEST");
let display = glium::Display::new(window, context, &events_loop).unwrap();
unsafe {
use glium::glutin::os::unix::WindowExt;
use winit::os::unix::x11::XConnection;
use winit::os::unix::x11::ffi::{Display, XID, CWOverrideRedirect, XSetWindowAttributes};
let x_connection = std::sync::Arc::<XConnection>::into_raw(display.gl_window().get_xlib_xconnection().unwrap());
let x_display = display.gl_window().get_xlib_display().unwrap() as *mut Display;
let x_window = display.gl_window().get_xlib_window().unwrap() as XID;
((*x_connection).xlib.XChangeWindowAttributes)(
x_display,
x_window,
CWOverrideRedirect,
&mut XSetWindowAttributes {
background_pixmap: 0,
background_pixel: 0,
border_pixmap: 0,
border_pixel: 0,
bit_gravity: 0,
win_gravity: 0,
backing_store: 0,
backing_planes: 0,
backing_pixel: 0,
save_under: 0,
event_mask: 0,
do_not_propagate_mask: 0,
override_redirect: 1,
colormap: 0,
cursor: 0,
}
);
((*x_connection).xlib.XUnmapWindow)(x_display, x_window);
((*x_connection).xlib.XMapWindow)(x_display, x_window);
}
Related
I want to use the image and imageproc crates to draw triangles that blend if there are any overlapping triangles. Currently my code looks like this:
use image::{Rgba, RgbaImage};
use imageproc::drawing;
use imageproc::drawing::Blend;
use imageproc::point::Point;
struct Triangle([Point<i32>; 3]);
fn main() {
let triangle = Triangle([
Point { x: 0, y: 0 },
Point { x: 100, y: 400 },
Point { x: 400, y: 100 },
]);
let mut image = Blend(RgbaImage::new(400, 400));
drawing::draw_polygon_mut(&mut image, &triangle.0, Rgba([255, 255, 0, 255]));
drawing::draw_polygon_mut(&mut image, &triangle.0, Rgba([0, 0, 255, 255]));
image.save("test.png").unwrap();
}
This should produce a single white triangle, but there is no save method for Blend canvases. Is there an easy way to save this image?
The actual ImageBuffer object is inside of the Blend object.
You can extract it via either destructuring or .0.
Destructuring:
let Blend(image_buffer) = image;
image_buffer.save("test.png").unwrap();
.0:
image.0.save("test.png").unwrap();
I'm trying to follow along the "hello compute" example from wgpu on Windows 10 (with some minor modifications, mainly gutting the shader so it does basically no actual computing), but when I read the buffer at the end, it's always zeroed out.
This is the shader I'm trying to run, it compiles fine and I think it's correct
[[block]]
struct Numbers
{
data: [[stride(4)]] array<u32>;
};
[[group(0), binding(0)]]
var<storage, read_write> numbers: Numbers;
[[stage(compute), workgroup_size(1)]]
fn main()
{
numbers.data[0] = numbers.data[0] + u32(1);
numbers.data[1] = numbers.data[1] + u32(1);
numbers.data[2] = numbers.data[2] + u32(1);
}
As for the wgpu code, it follows the tutorial quite closely:
I get the instance, device, and queue
let instance = Instance::new(Backends::PRIMARY);
let adapter = block_on(instance
.request_adapter(&RequestAdapterOptions
{
power_preference: PowerPreference::default(),
compatible_surface: None,
}))
.unwrap();
let (device, queue) = block_on(adapter
.request_device(&Default::default(), None))
.unwrap();
Compile the shader and make a pipeline:
let shader = device.create_shader_module(&ShaderModuleDescriptor
{
label: Some("shader"),
source: ShaderSource::Wgsl(shader_src.into()),
});
let pipeline = device.create_compute_pipeline(&ComputePipelineDescriptor
{
label: None,
layout: None,
module: &shader,
entry_point: "main",
});
Make the staging and storage buffer. The dbg!(size) prints 12, which should be correct for a 3-length array for 4-byte u32s.
let buffer = [1u32, 2, 3];
let size = std::mem::size_of_val(&buffer) as u64;
dbg!(size);
let staging_buffer = device.create_buffer(&BufferDescriptor
{
label: None,
size: size,
usage: BufferUsages::MAP_READ | BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let storage_buffer = device.create_buffer_init(&BufferInitDescriptor
{
label: Some("storage buffer"),
contents: cast_slice(&buffer),
usage: BufferUsages::STORAGE
| BufferUsages::COPY_DST
| BufferUsages::COPY_SRC,
});
set up the bind group:
let bg_layout = pipeline.get_bind_group_layout(0);
let bind_group = device.create_bind_group(&BindGroupDescriptor
{
label: None,
layout: &bg_layout,
entries: &[BindGroupEntry
{
binding: 0,
resource: storage_buffer.as_entire_binding(),
}]
});
Get the encoder and create the compute pass. The copy_buffer_to_buffer should copy the storage buffer to the staging buffer so I can read it at the end.
let mut encoder = device.create_command_encoder(&CommandEncoderDescriptor
{
label: None,
});
{
let mut cpass = encoder.begin_compute_pass(&ComputePassDescriptor
{
label: None
});
cpass.set_pipeline(&pipeline);
cpass.set_bind_group(0, &bind_group, &[]);
cpass.dispatch(1, 1, 1);
}
encoder.copy_buffer_to_buffer(
&storage_buffer, 0,
&staging_buffer, 0,
size);
queue.submit(Some(encoder.finish()));
And then submit the compute pass and block for the result:
let buf_slice = staging_buffer.slice(..);
let buf_future = buf_slice.map_async(MapMode::Read);
device.poll(Maintain::Wait);
if let Ok(()) = block_on(buf_future)
{
let data = buf_slice.get_mapped_range();
let result = cast_slice::<u8, u32>(&data).to_vec();
drop(data);
staging_buffer.unmap();
println!("{:?}", result);
}
else
{
println!("error");
}
The error case isn't reached, and the program terminates with no errors, but the result is always printed [0, 0 ,0], when it should be [2, 3, 4].
What am I doing wrong?
The program works fine when I'm running it on my discrete graphics card, but wgpu is bugged on my integrated Intel HD Graphics 630, which is why the program appeared not to work.
i have about 10 objects that filters can be applied to, and a filter engine that goes through them one by one and applies filters to each.
import { fabric } from 'fabric'
const filters = {
brightness: new fabric.Image.filters.Brightness(),
premade: new fabric.Image.filters.ColorMatrix(),
}
const addFilter = (canvas, index, value = null) => {
let pickedFilter
let objects = canvas.getObjects()
switch (index) {
case 1:
pickedFilter = sepia
break;
case 8:
pickedFilter = brightness
break;
default:
break;
}
objects.forEach((o) => {
if (o.filters) {
if (!o.filters[0]) {
o.filters.push(filters.brightness)
o.filters.push(filters.premade)
}
pickedFilter(o, value)
}
});
canvas.renderAll();
}
export default addFilter
function sepia(o){
let value = [
0.67, 0, 0, 0, 0,
0, 0.54, 0, 0, 0,
0, 0, 0.4, 0, 0,
0, 0, 0, 1, 0
]
delete filters.contrast.contrast
delete filters.premade.matrix
filters.premade.matrix = value
o.applyFilters()
}
function brightness(o, value) {
value = (value / 100) / 2
filters.brightness.brightness = value
o.applyFilters()
}
initally when objects don't have filter at filters[0], i go ahead and push the filters.
later on when the user moves the slider like in this gif, i go through each object and if it can have filters i go ahead and change the value of filter and then apply the filters. as you can see it's a bit laggy and i think the biggest problem is that there are multiple objects that filters are applied to.
in this gif, i get rid of all objects but one and i can feel that the performance is better, but the problem is that i need to apply the filters to all of the objects.
i tried running the code inside filter functions only once (for example i put the value asignment, deletion of previous value and value asignment inside sepia funtion, outside the loop), but it didn't change anything.
how can i improve the performance and make it less laggy?
I'm trying to get calibrateCamera working on my Node.js backend. The library is working fine, but I'm having trouble with the OpenCV functions not giving any error messages if I have faulty inputs. I'm kind of flying in the dark.
But that's beside the point. I've taken 17 images of the chessboard calibration pattern, and got the code to detect the pattern in all of the images. Everything works just fine until I call cv.calibrateCamera(), probably due to me not knowing what I should use as the required inputs for cameraMatrix and distCoeff (4th and 5th input parameters). However, I can't be 100% this is the issue because of not receiving any error messages from errors in the cv... functions.
I tried to follow the example at https://docs.opencv.org/3.1.0/dc/dbb/tutorial_py_calibration.html , but on python in the tutorial you can use None as inputs to cameraMatrix and distCoeff. I tried to use null, but that didn't work either.
Any help would be appreciated.
const size = new cv.Size(9,6);
let mat = null;
let objpt = [];
for(let i=0;i<9;i++) {
for(let j=0;j<6;j++) {
objpt.push(cv.Point(2.5*i,2.5*j,0))
}
}
let objectPoints = [];
let imagePoints =[];
for (let i=0; i < 17;i++) {
mat = cv.imread('./calib/calib'+(i+1)+'.jpg');
let smallmat = mat.resize(756,1008);
const corners = smallmat.findChessboardCorners(size);
if (corners.returnValue) {
objectPoints = objectPoints.concat(objpt);
imagePoints = imagePoints.concat(corners.corners);
}
}
// THIS IS WHERE EXECUTION JUST STOPS WITH NO ERROR MESSAGE
cv.calibrateCamera(
objectPoints,
imagePoints,
new cv.Size(756,1008),
new cv.Mat(3, 3, cv.CV_32FC1,0),
[0,0,0,0,0]
);
According to the test parameters should be passed like this:
[_objectPoints, _objectPoints],
[imagePoints, imagePoints],
imageSize,
_cameraMatrix,
distCoefficients
where
const _cameraMatrix = new cv.Mat([
[800, 0, 100],
[0, 800, 100],
[0, 0, 1]
], cv.CV_64F);
and
const distCoefficients = [0, 0.5, 1.0, 1.0];
I'm creating a program that uses glutin, and I want to provide a command-line flag to make the window override-redirect so it can be used as a desktop wallpaper for certain window managers that don't support the desktop window type.
I've done a lot of research and managed to cobble together a block of code that I thought would work, using the provided xlib display and window from glutin. Here is my existing code:
unsafe {
use glutin::os::unix::WindowExt;
let x_connection = std::sync::Arc::<glutin::os::unix::x11::XConnection>::into_raw(display.gl_window().get_xlib_xconnection().unwrap());
((*x_connection).xlib.XChangeWindowAttributes)(
display.gl_window().get_xlib_display().unwrap() as *mut glutin::os::unix::x11::ffi::Display,
display.gl_window().get_xlib_window().unwrap() as glutin::os::unix::x11::ffi::XID,
glutin::os::unix::x11::ffi::CWOverrideRedirect,
&mut glutin::os::unix::x11::ffi::XSetWindowAttributes {
background_pixmap: 0,
background_pixel: 0,
border_pixmap: 0,
border_pixel: 0,
bit_gravity: 0,
win_gravity: 0,
backing_store: 0,
backing_planes: 0,
backing_pixel: 0,
save_under: 0,
event_mask: 0,
do_not_propagate_mask: 0,
override_redirect: 1,
colormap: 0,
cursor: 0,
}
);
}
It doesn't give me any errors, and compiles and runs fine with the rest of the code, but it doesn't make the window override-redirect like I want to.
I figured it out. The override-redirect only takes place when the window is mapped, so if I unmap it and map it again then it works!
Here is the code now:
unsafe {
use glutin::os::unix::WindowExt;
use glutin::os::unix::x11::XConnection;
use glutin::os::unix::x11::ffi::{Display, XID, CWOverrideRedirect, XSetWindowAttributes};
let x_connection = std::sync::Arc::<XConnection>::into_raw(display.gl_window().get_xlib_xconnection().unwrap());
let x_display = display.gl_window().get_xlib_display().unwrap() as *mut Display;
let x_window = display.gl_window().get_xlib_window().unwrap() as XID;
((*x_connection).xlib.XChangeWindowAttributes)(
x_display,
x_window,
CWOverrideRedirect,
&mut XSetWindowAttributes {
background_pixmap: 0,
background_pixel: 0,
border_pixmap: 0,
border_pixel: 0,
bit_gravity: 0,
win_gravity: 0,
backing_store: 0,
backing_planes: 0,
backing_pixel: 0,
save_under: 0,
event_mask: 0,
do_not_propagate_mask: 0,
override_redirect: 1,
colormap: 0,
cursor: 0,
}
);
((*x_connection).xlib.XUnmapWindow)(x_display, x_window);
((*x_connection).xlib.XMapWindow)(x_display, x_window);
}