rust struct: struct with generics as variable - rust

I have this, using rp2040_hal:
use defmt_rtt as _;
use embedded_hal::timer::CountDown;
use rp2040_hal::pio::StateMachineIndex;
use rp2040_hal::prelude::_rphal_pio_PIOExt;
use rp2040_hal::{gpio::PinId, timer::Timer, Clock};
use rp_pico::hal::clocks::ClocksManager;
use rp_pico::hal::pac::Peripherals;
use smart_leds::RGB8;
use ws2812_pio::{self, Ws2812};
pub struct LedControl<C, I, SM>
where
C: CountDown,
I: PinId + rp2040_hal::gpio::bank0::BankPinId,
SM: StateMachineIndex,
{
leds: [RGB8; 100],
num_of_leds: usize,
ws: Ws2812<rp2040_hal::pac::PIO0, SM, C, I>,
}
impl<C, I, SM> LedControl<C, I, SM>
where
C: CountDown,
I: PinId + rp2040_hal::gpio::bank0::BankPinId,
SM: StateMachineIndex,
{
pub fn new(pins: rp2040_hal::gpio::Pins, clk: &ClocksManager, timer: &Timer, num: usize) {
let mut pc = Peripherals::take().unwrap();
let (mut pio, sm0, _, _, _) = pc.PIO0.split(&mut pc.RESETS);
let strip = Ws2812::new(
pins.gpio4.into_mode(),
&mut pio,
sm0,
clk.peripheral_clock.freq(),
timer.count_down(),
);
LedControl {
num_of_leds: num,
ws: strip,
leds: [(0, 0, 0).into(); 100],
};
}
pub fn modifyPixel(&mut self, rgb: &(u8, u8, u8), idx: &usize) {
self.leds[idx.clone()] = (*rgb).into()
}
pub fn pixels(&mut self) -> [RGB8; 100] {
return self.leds.clone();
}
pub fn test_ws(&mut self) {
self.ws.write(self.leds.iter().copied()).unwarp();
}
}
with cargo.toml:
[package]
edition = "2021"
name = "rp2040-project-WS3823"
version = "0.1.0"
[dependencies]
cortex-m = "0.7"
cortex-m-rt = "0.7"
embedded-hal = { version = "0.2.5", features = ["unproven"] }
defmt = "0.3"
defmt-rtt = "0.4.0"
panic-probe = { version = "0.3", features = ["print-defmt"] }
# We're using a Pico by default on this template
rp-pico = "0.6.0"
# but you can use any BSP. Uncomment this to use the pro_micro_rp2040 BSP instead
# sparkfun-pro-micro-rp2040 = "0.3"
# If you're not going to use a Board Support Package you'll need these:
rp2040-hal = { version = "0.7.0", features = ["rt"] }
# rp2040-boot2 = "0.2"
smart-leds = "0.3.0"
ws2812-pio = "0.5.0"
fugit = "0.3.6"
# cargo build/run
[profile.dev]
codegen-units = 1
debug = 2
debug-assertions = true
incremental = false
opt-level = 3
overflow-checks = true
# cargo build/run --release
[profile.release]
codegen-units = 1
debug = 2
debug-assertions = false
incremental = false
lto = 'fat'
opt-level = 3
overflow-checks = false
# do not optimize proc-macro crates = faster builds from scratch
[profile.dev.build-override]
codegen-units = 8
debug = false
debug-assertions = false
opt-level = 0
overflow-checks = false
[profile.release.build-override]
codegen-units = 8
debug = false
debug-assertions = false
opt-level = 0
overflow-checks = false
# cargo test
[profile.test]
codegen-units = 1
debug = 2
debug-assertions = true
incremental = false
opt-level = 3
overflow-checks = true
# cargo test --release
[profile.bench]
codegen-units = 1
debug = 2
debug-assertions = false
incremental = false
lto = 'fat'
opt-level = 3
the error is:
self.ws.write(self.leds.iter().copied()).unwarp();
| ^^^^^ method not found in `Ws2812<rp2040_hal::rp2040_pac::PIO0, SM, C, I>`
which make since because this is a generic reference, not an implementation,
but how do I tell the compiler that I am using the implementation? ('strip' in the c-tor)
I am using the template for the pico pi: https://github.com/rp-rs/rp2040-project-template.git

There are two problems in your code:
SmartLedsWrite, which provides write(), is not in scope
SmartLedsWrite requires your timer to be specifically of type rp2040_hal::timer::CountDown, not of impl embedded_hal::timer::CountDown, as can be seen here.
The write() function is part of the SmartLedsWrite trait.
To use a trait in Rust, it has to be in scope.
You can bring it in scope by modifying the following line in your code:
use smart_leds::RGB8;
to:
use smart_leds::{SmartLedsWrite, RGB8};
Fixing the CountDown requires introducing a lifetime on your LedControl object and replacing the generic with an actual type.
This code compiles (not sure if it works, don't have the required hardware in front of me):
use defmt_rtt as _;
use rp2040_hal::pio::StateMachineIndex;
use rp2040_hal::prelude::_rphal_pio_PIOExt;
use rp2040_hal::timer::CountDown;
use rp2040_hal::{gpio::PinId, timer::Timer, Clock};
use rp_pico::hal::clocks::ClocksManager;
use rp_pico::hal::pac::Peripherals;
use smart_leds::{SmartLedsWrite, RGB8};
use ws2812_pio::{self, Ws2812};
pub struct LedControl<'t, I, SM>
where
I: PinId + rp2040_hal::gpio::bank0::BankPinId,
SM: StateMachineIndex,
{
leds: [RGB8; 100],
num_of_leds: usize,
ws: Ws2812<rp2040_hal::pac::PIO0, SM, CountDown<'t>, I>,
}
impl<'t, I, SM> LedControl<'t, I, SM>
where
I: PinId + rp2040_hal::gpio::bank0::BankPinId,
SM: StateMachineIndex,
{
pub fn new(pins: rp2040_hal::gpio::Pins, clk: &ClocksManager, timer: &Timer, num: usize) {
let mut pc = Peripherals::take().unwrap();
let (mut pio, sm0, _, _, _) = pc.PIO0.split(&mut pc.RESETS);
let strip = Ws2812::new(
pins.gpio4.into_mode(),
&mut pio,
sm0,
clk.peripheral_clock.freq(),
timer.count_down(),
);
LedControl {
num_of_leds: num,
ws: strip,
leds: [(0, 0, 0).into(); 100],
};
}
pub fn modifyPixel(&mut self, rgb: &(u8, u8, u8), idx: &usize) {
self.leds[idx.clone()] = (*rgb).into()
}
pub fn pixels(&mut self) -> [RGB8; 100] {
return self.leds.clone();
}
pub fn test_ws(&mut self) {
self.ws.write(self.leds.iter().copied()).unwrap();
}
}

Related

What does DlsymWeak::initialize do in rust?

I have a program that calls libc::memchr many times. When profiling my code I see that
the function using the most time is DlsymWeak::initialize. initialize seem to be called by memchr, which is called by my wrapper:
pub fn memchr_libc_ptr(ptr: *const std::os::raw::c_void, len: usize, needle: u8) -> Option<usize> {
let res = unsafe { // Profiler calls out this function call as slow
libc::memchr(
ptr,
needle as i32,
len)
};
if res == 0 as *mut std::os::raw::c_void {
return None;
}
let res = res as *const u8;
let ptr = ptr as *const u8;
Some((unsafe { res.offset_from(ptr) }) as usize)
}
Further more the initialize source code says that this method should be cold "should only happen during first-time initialization" but clearly it's being called much more than that.
What is DlsymWeak::initialize?
Can I avoid all these calls to it?
This is running on MacOS 12.3.1 rustup 1.25.1 (bb60b1e89 2022-07-12), x86-64 with the following profile
[profile.release]
lto = true
codegen-units = 1
debug = true
panic = "abort"
overflow-checks = false
incremental = false

What is the equivalent working Rust code example for this WAT (WebAssembly Text) example?

The below WAT, adapted from a couple of Wasmtime examples, runs absolutely fine embedded in my application but what I thought was the equivalent Rust code fails with:
Running `target\debug\hello_world.exe`
Error: expected 5 imports, found 1
error: process didn't exit successfully: `target\debug\hello_world.exe` (exit code: 1)
Here's the working WAT:
(module
(import "host" "greet" (func $greet (param i32 i32)))
(func (export "run")
i32.const 4 ;; ptr
i32.const 22 ;; len
call $greet)
(memory (export "memory") 1)
(data (i32.const 4) "Calling back from WAT!")
)
And the failing Rust:
use std::ffi::CString;
#[link(wasm_import_module = "host")]
extern "C" {
fn greet(ptr: i32, len: i32);
}
static GREETING: &str = "Calling back from Rust!";
#[no_mangle]
pub extern "C" fn run() {
let greeting = CString::new(GREETING).expect("contains null byte");
let ptr = greeting.as_ptr();
std::mem::forget(ptr);
unsafe {
greet(ptr as i32, GREETING.len() as i32);
}
}
My minimal example app that embeds the WASM modules:
use std::str;
use anyhow::Result;
use wasmtime::{Caller, Engine, Extern, Func, Instance, Module, Store};
struct MyState {
name: String,
count: usize,
}
fn main() -> Result<()> {
let engine = Engine::default();
let code = include_bytes!("../hello.wat");
let module = Module::new(&engine, code)?;
let mut store = Store::new(
&engine,
MyState {
name: "Hello, junglie85!".to_string(),
count: 0,
},
);
let greet_func = Func::wrap(
&mut store,
|mut caller: Caller<'_, MyState>, ptr: i32, len: i32| {
let mem = match caller.get_export("memory") {
Some(Extern::Memory(mem)) => mem,
_ => anyhow::bail!("failed to find host memory"),
};
let data = mem
.data(&caller)
.get(ptr as u32 as usize..)
.and_then(|arr| arr.get(..len as u32 as usize));
let string = match data {
Some(data) => match str::from_utf8(data) {
Ok(s) => s,
Err(_) => anyhow::bail!("invalid utf-8"),
},
None => anyhow::bail!("pointer/length out of bounds"),
};
println!("> {} {}", caller.data().name, string);
caller.data_mut().count += 1;
Ok(())
},
);
let imports = [greet_func.into()];
let instance = Instance::new(&mut store, &module, &imports)?;
let run = instance.get_typed_func::<(), ()>(&mut store, "run")?;
println!("# Global count = {}", store.data().count);
run.call(&mut store, ())?;
println!("# Global count = {}", store.data().count);
Ok(())
}
What is the Rust equivalent of the WAT example?

Rust Vulkano creation of compute pipeline does not work and freeze my video card

I'm trying to load a shader into an amd video card. After all the buffers are created, I try to create a new Compute pipeline. As i started to debug it printing messages i found out that the "Finished Creating the compute pipeline" is never printed. When i run it with `cargo run --release` it prints the: "Creating pipeline with shader" but after some seconds it freezes my whole computer and i have to turn it off and back on again...
My Vulkano version is: 0.32.1;
My vulkano-shaders version is: 0.32.1;
My Video Card is: AMD RX570 4GB
Vulkano Physical device properties:
buffer_image_granularity: 64,
compute_units_per_shader_array: Some(
8,
),
conformance_version: Some(
1.2.0,
),
Cargo.toml:
[package]
name = "vulkano_matrix"
version = "0.1.0"
edition = "2021"
[dependencies]
vulkano = "0.32.1"
vulkano-shaders = "0.32.0"
rand = "0.8.4"
nalgebra="0.31.4"
colored = "2.0.0"
bytemuck = "1.12.3"
// main.rs
extern crate nalgebra as na;
use bytemuck::{Pod, Zeroable};
use colored::Colorize;
use na::{dmatrix, DMatrix};
use std::{
io::{stdin, stdout, Write},
sync::Arc,
time::Instant,
};
use vulkano::{
buffer::{BufferUsage, CpuAccessibleBuffer, DeviceLocalBuffer},
command_buffer::{
allocator::{CommandBufferAllocator, StandardCommandBufferAllocator},
AutoCommandBufferBuilder, PrimaryAutoCommandBuffer, PrimaryCommandBufferAbstract,
},
descriptor_set::{
allocator::StandardDescriptorSetAllocator, PersistentDescriptorSet, WriteDescriptorSet,
},
device::{
physical::PhysicalDevice, Device, DeviceCreateInfo, DeviceExtensions, Features,
QueueCreateInfo, QueueFlags,
},
instance::{Instance, InstanceCreateInfo},
memory::allocator::{MemoryAllocator, StandardMemoryAllocator},
pipeline::Pipeline,
pipeline::{ComputePipeline, PipelineBindPoint},
sync::GpuFuture,
VulkanLibrary,
};
#[derive(Clone, Copy)]
pub enum Padding {
None,
Fixed(usize, usize),
Same,
}
#[repr(C)]
#[derive(Default, Copy, Clone, Debug, Zeroable, Pod)]
struct Dimension {
pub rows: usize,
pub columns: usize,
pub channels: usize,
}
impl Dimension {
pub fn from_matrix<T>(mat: &DMatrix<T>) -> Self {
let shape = mat.shape();
Self {
rows: shape.0,
columns: shape.1,
channels: 1,
}
}
}
#[repr(C)]
#[derive(Default, Copy, Clone, Debug, Zeroable, Pod)]
struct BufferDimensions {
pub input_matrix: Dimension,
pub kernel: Dimension,
pub output_matrix: Dimension,
}
#[repr(C)]
#[derive(Default, Copy, Clone, Debug, Zeroable, Pod)]
struct ConvolutionOptions {
pub padding: [i32; 2],
pub stride: u32,
}
fn input(question: impl Into<String>) -> String {
let mut result = "".to_string();
print!("{} ", question.into().bold().cyan());
stdout().flush().expect("Could not flush stdout");
stdin()
.read_line(&mut result)
.expect("Could not read stdin");
result
}
fn main() {
let library = VulkanLibrary::new().expect("Could not find vulkan.dll");
let instance =
Instance::new(library, InstanceCreateInfo::default()).expect("Failed to Create Instance");
println!("Available GPUs:");
let physical_devices = instance
.enumerate_physical_devices()
.expect("Could not enumerate the physical devices")
.enumerate()
.map(|(i, physical)| {
println!(
"[{}]: \"{}\"; TYPE: \"{:?}\"; API_VERSION: \"{}\"",
i.to_string().bold().bright_magenta(),
physical.properties().device_name.to_string().bold().green(),
physical.properties().device_type,
physical.api_version()
);
physical
})
.collect::<Vec<Arc<PhysicalDevice>>>();
let physical_index = input(format!("Type the chosen [{}]:", "index".bright_magenta()))
.replace("\n", "")
.parse::<usize>()
.expect("Please type a number.");
let physical = physical_devices[physical_index].clone();
println!(
"Using {}; TYPE: \"{:?}\"; \n\n {:?} \n\n {:#?}",
physical.properties().device_name.to_string().bold().green(),
physical.properties().device_type,
physical.api_version(),
physical.properties()
);
return;
let queue_family_index = physical
.queue_family_properties()
.iter()
.position(|q| {
q.queue_flags.intersects(&QueueFlags {
compute: true,
..QueueFlags::empty()
})
})
.unwrap() as u32;
let (device, mut queues) = Device::new(
physical,
DeviceCreateInfo {
enabled_features: Features::empty(),
queue_create_infos: vec![QueueCreateInfo {
queue_family_index,
..Default::default()
}],
..Default::default()
},
)
.expect("Failed to create device");
let queue = queues.next().unwrap();
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
let mut builder = AutoCommandBufferBuilder::primary(
&command_buffer_allocator,
queue.queue_family_index(),
vulkano::command_buffer::CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
let stride = 1;
let get_result_shape = |input_shape: usize, padding: usize, ker_shape: usize| {
(input_shape + 2 * padding - ker_shape) / stride + 1
};
let padding = Padding::Same;
let input_data = dmatrix![1.0f32, 2., 3.; 4., 5., 6.; 7., 8., 9.];
let kernel_data = dmatrix![11.0f32, 19.; 31., 55.];
let input_shape = Dimension::from_matrix(&input_data);
let kernel_shape = Dimension::from_matrix(&kernel_data);
let padding = match padding {
Padding::None => (0, 0),
Padding::Fixed(x_p, y_p) => (x_p, y_p),
Padding::Same => {
let get_padding = |input_shape: usize, ker_shape: usize| {
(((stride - 1) as i64 * input_shape as i64 - stride as i64 + ker_shape as i64)
as f64
/ 2.0)
.ceil() as usize
};
(
/* rows */
get_padding(input_shape.rows, kernel_shape.rows),
/* columns */
get_padding(input_shape.columns, kernel_shape.columns),
)
}
};
let dimensions = BufferDimensions {
input_matrix: input_shape,
kernel: kernel_shape,
output_matrix: Dimension {
rows: get_result_shape(input_shape.rows, padding.0, kernel_shape.rows),
columns: get_result_shape(input_shape.columns, padding.1, kernel_shape.columns),
channels: 1,
},
};
let options = ConvolutionOptions {
padding: [padding.0 as i32, padding.1 as i32],
stride: stride as u32,
};
let dimensions_buffer = DeviceLocalBuffer::from_data(
&memory_allocator,
dimensions,
BufferUsage {
uniform_buffer: true,
..BufferUsage::empty()
},
&mut builder,
)
.expect("Failed to create uniform buffer.");
let options_buffer = DeviceLocalBuffer::from_data(
&memory_allocator,
options,
BufferUsage {
uniform_buffer: true,
..BufferUsage::empty()
},
&mut builder,
)
.expect("Failed to create uniform buffer.");
println!(
"{:?} {:?} {:?} {:?}",
input_data, dimensions, options, kernel_data
);
let input_buffer = DeviceLocalBuffer::from_iter(
&memory_allocator,
input_data.data.as_vec().to_owned(),
BufferUsage {
uniform_buffer: true,
..BufferUsage::empty()
},
&mut builder,
)
.expect("Failed to create uniform buffer.");
let kernel_buffer = DeviceLocalBuffer::from_iter(
&memory_allocator,
kernel_data.data.as_vec().to_owned(),
BufferUsage {
uniform_buffer: true,
..BufferUsage::empty()
},
&mut builder,
)
.expect("Failed to create uniform buffer.");
let output_buffer = CpuAccessibleBuffer::from_iter(
&memory_allocator,
BufferUsage {
storage_buffer: true,
..BufferUsage::empty()
},
false,
[0..(dimensions.output_matrix.channels
* dimensions.output_matrix.rows
* dimensions.output_matrix.columns)]
.map(|__| 0.0f32)
.to_owned(),
)
.expect("Failed to create storage buffer.");
println!("Loading shader");
let cs = cs::load(device.clone()).unwrap();
println!("Creating pipeline with shader"); // This line prints just fine
let compute_pipeline = ComputePipeline::new(
device.clone(),
cs.entry_point("main").unwrap(),
&(),
None,
|_| {},
)
.expect("Failed to create compute shader");
println!("Finished Creating the compute pipeline"); // THIS LINE NEVER GETS RUN
}
pub mod cs {
use vulkano_shaders::shader;
shader! {
ty: "compute",
path: "./matrix_convolution.glsl"
}
}
The shader is:
#version 450
#pragma shader_stage(compute)
layout(local_size_x=32, local_size_y=32, local_size_z=16) in;
struct Dimension {
uint rows;
uint columns;
uint channels;
};
layout(set=0, binding=0) buffer Dimensions {
Dimension input_matrix;
Dimension kernel;
Dimension output_matrix;
} dims_buf;
layout(set=0, binding=1) buffer readonly InputMatrix {
float[] input_matrix;
};
layout(set=0, binding=2) buffer readonly Kernel {
float[] kernel;
};
layout(set=0, binding=3) buffer writeonly OutputMatrix {
float[] output_matrix;
};
layout(set=0, binding=4) buffer Options {
ivec2 padding;
uint stride;
} options_buf;
void main() {
const uint raw_row = gl_GlobalInvocationID.x;
const uint raw_column = gl_GlobalInvocationID.y;
const uint raw_channel = gl_GlobalInvocationID.z;
}
I tried to run similar programs with different shaders and it worked just fine.
It turns out that the work groups sizes must be fewer less than the
Therefore: local_size_x * local_size_y * local_size_z must be less than max_compute_work_group_invocations
physical.properties().max_compute_work_group_invocations ```

Can't import my crate into criterion benchmark

I'm trying to use the criterion crate to benchmark a function in my binary crate.
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use rand::Rng;
use enigma::enigma::Enigma; // failed to resolve: use of undeclared crate or module `enigma` use of undeclared crate or module `enigma`rustcE0433
fn gen_rand_string(n: usize) -> String {
const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ";
let mut rng = rand::thread_rng();
(0..n)
.map(|_| {
let idx = rng.gen_range(0..CHARSET.len());
CHARSET[idx] as char
})
.collect()
}
// Lots of red squigglies because of imports
fn construct_enigma() -> Enigma {
let rotors: RotorConfig =
RotorConfig::try_from([(Rotors::I, 'A'), (Rotors::II, 'X'), (Rotors::IV, 'N')])
.unwrap();
let plugs = Plugs::try_from(vec![]).unwrap();
let plugboard: Plugboard = Plugboard::try_from(plugs).unwrap();
let reflector: Reflectors = Reflectors::B;
Enigma::new(rotors, plugboard, reflector)
}
fn criterion_benchmark(c:&mut Criterion){
let e = construct_enigma();
let s1000 = gen_rand_string(1000);
c.bench_function("ENC 1000", |b|b.iter(||))
}
Here's my cargo.toml
[package]
name = "enigma"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
anyhow = "1.0.65"
bimap = "0.6.2"
bruh_moment = "0.1.1"
itertools = "0.10.3"
log = "0.4.17"
rand = "0.8.5"
strum = "0.24.1"
strum_macros = "0.24.3"
thiserror = "1.0.37"
[dev-dependencies]
criterion = "0.4.0"
[[bench]]
name = "encode_string"
harness = false
by my understanding i should be importing my function using %mycratename%::foo::bar instead of crate::foo::bar but i'm getting an error saying that my crate can't be found. How do I get rust to recognize my local unpublished crate for criterion benchmarks?

Rust function name (caller) or any other context inside macro

Is there a way in Rust to get the "calling" function name or any other contextual information inside a macro?
Example:
#[macro_export]
macro_rules! somemacro {
( $x:expr ) => {
{
// access function name (etc..) that called this macro
}
};
}
This can be done using a procedural macro:
extern crate proc_macro;
use proc_macro::TokenStream;
#[proc_macro_attribute]
pub fn with_name(_: TokenStream, item: TokenStream) -> TokenStream {
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
let fn_name = input.ident.to_string();
let const_decl = quote::quote! {
const THIS_FN: &str = #fn_name;
};
input.block.stmts.insert(0, syn::parse(const_decl.into()).unwrap());
let output = quote::quote! {
#input
};
output.into()
}
Cargo.toml:
[package]
name = "with_name"
version = "0.1.0"
edition = "2018"
[lib]
proc-macro = true
[dependencies]
quote = "0.6.12"
syn = { version = "0.15.37", features = ["full"] }
Which can be used as:
#[with_name::with_name]
fn foo() {
println!("Name: {}", THIS_FN);
}
fn main() {
foo();
}
Also note that if you only care about the module, there is a built-in macro for that:
mod test {
pub fn foo() {
println!("Module: {}", module_path!());
}
}
fn main() {
test::foo();
}
(link to playground)

Resources