Simple server - threads are left open if try to stream a file - multithreading

I'm fairly new to rust and I'm trying to learn it by doing www.rust-class.org. In one of the assignments I've to implement simple web server. Most of the code on github is for v0.9 so I had to rewrite some of the things. Anyway:
Webserver code is below but I don't expect you to read everything so down below I highlight the part when the problem occurs.
use std::io::*;
use std::io::net::ip::SocketAddr;
use std::{str, os};
use std::sync::{Mutex, Arc, Semaphore};
use std::path::{Path, PosixPath};
use std::io::fs::PathExtensions;
use std::collections::{BinaryHeap, HashMap};
use std::io::timer::sleep;
use std::time::duration::Duration;
static CONTENT_TYPE_HTML: &'static str = "Content-Type: text/html; charset=UTF-8\r\n\r\n";
static HTTP_SUCCESS: &'static str = "HTTP/1.1 200 OK\r\n";
static HTTP_NOT_FOUND: &'static str = "HTTP/1.1 404 OK\r\n";
static START_COUNTER_STYLE: &'static str = "
<doctype !html><html>
<head>
<title>Hello, Rust!</title>
<style>
body { background-color: #884414; color: #FFEEAA}
h1 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm red }
h2 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm green }
</style>
</head>
<body>";
static END_COUNTER_STYLE: &'static str = "</body></html>\r\n";
static FILE_CHUNK: uint = 8192;
static MAX_CONCURRENCY: int = 4;
#[deriving(PartialEq, Eq)]
struct HTTPRequest {
peer_name: SocketAddr,
path: PosixPath,
file_size: uint,
priority: uint
}
impl PartialOrd for HTTPRequest {
fn partial_cmp(&self, other: &HTTPRequest) -> Option<Ordering> {
// Comparison is reversed to make PriorityQueue behave like a min-heap
(self.priority).partial_cmp(&other.priority)
}
}
impl Ord for HTTPRequest {
fn cmp(&self, other: &HTTPRequest) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
pub struct WebServer {
port: uint,
ip_str: String,
request_queue_arc: Arc<Mutex<BinaryHeap<HTTPRequest>>>,
stream_map_arc: Arc<Mutex<HashMap<SocketAddr, Result<net::tcp::TcpStream, IoError>>>>,
notify_sender: Sender<()>,
notify_recv: Receiver<()>,
www_dir_path: Path,
concurrency_limit: Arc<Semaphore>,
}
impl WebServer {
pub fn new(ip_str: String, port: uint, www_dir_str: String) -> WebServer {
let (notify_sender, notify_recv) = channel();
let www_dir_path = Path::new(www_dir_str);
debug!("I'm serving server from directory: {}", www_dir_path.display());
WebServer {
ip_str: ip_str,
port: port,
www_dir_path: www_dir_path,
request_queue_arc: Arc::new(Mutex::new(BinaryHeap::new())),
stream_map_arc: Arc::new(Mutex::new(HashMap::new())),
notify_sender: notify_sender,
notify_recv: notify_recv,
concurrency_limit: Arc::new(Semaphore::new(MAX_CONCURRENCY))
}
}
pub fn run(&mut self) {
self.listen();
self.dequeue_static_file_request();
}
pub fn listen(&mut self) {
let addr = from_str::<SocketAddr>(format!("{}:{}", self.ip_str, self.port).as_slice()).expect("Address error.");
let stream_map_arc = self.stream_map_arc.clone();
let notify_sender = self.notify_sender.clone();
let request_queue_arc = self.request_queue_arc.clone();
let www_dir_path = self.www_dir_path.clone();
spawn(proc(){
let mut acceptor = net::tcp::TcpListener::bind(addr).listen();
println!("Listening on {}", addr);
let mut requests_counter: uint = 0;
for stream in acceptor.incoming() {
match stream.clone() {
Ok(mut res) => { res.set_timeout(Some(1000*10)); },
Err(why) => { panic!("Couldn't set timout for stream: {}", why.desc); }
}
requests_counter += 1;
let stream_map_arc = stream_map_arc.clone();
let notify_sender = notify_sender.clone();
let request_queue_arc = request_queue_arc.clone();
let www_dir_path = www_dir_path.clone();
spawn(proc() {
let mut stream = stream;
let mut buf = [0, ..500];
stream.read(&mut buf);
let request_str = str::from_utf8(buf.as_slice());
debug!("Request:\n{}", request_str);
let peer_name: SocketAddr = WebServer::peer_name(stream.clone());
match WebServer::get_request_path(www_dir_path.clone(), buf) {
Ok(request_path) => {
let extension = match request_path.extension_str() {
Some(ext) => ext,
None => ""
};
debug!("Requested path :\n{}", request_path.as_str());
debug!("Extension :\n{}",extension);
if request_path.as_str().expect("Request path err") == "www" {
debug!("===== Counter Page request =====");
WebServer::respond_with_counter_page(stream, requests_counter);
debug!("=====Terminated connection from [{}:{}].=====", peer_name.ip, peer_name.port);
} else if request_path.is_file() && (extension == "html" || extension == "bin") {
debug!("===== Static page request =====");
WebServer::enqueue_static_file_request(
stream,
request_path.clone(),
peer_name,
stream_map_arc,
request_queue_arc,
notify_sender
);
} else if request_path.is_file() && extension == "html" {
debug!("===== Dynamic page request =====");
// WebServer::respond_with_dynamic_page(stream, request_path.clone());
debug!("=====Terminated connection from [{}:{}].=====", peer_name.ip, peer_name.port);
} else {
debug!("===== Respond with error page =====");
WebServer::respond_with_error_page(stream);
debug!("=====Terminated connection from [{}:{}].=====", peer_name.ip, peer_name.port);
}
},
Err(_) => {
debug!("===== Respond with error page =====");
WebServer::respond_with_error_page(stream);
debug!("=====Terminated connection from [{}:{}].=====", peer_name.ip, peer_name.port);
}
}
})
}
});
}
fn respond_with_counter_page(stream: Result<net::tcp::TcpStream, IoError>, requests_counter: uint) {
WebServer::force_write(stream.clone(), format!("{}{}{}Requests:{}{}",
HTTP_SUCCESS,
CONTENT_TYPE_HTML,
START_COUNTER_STYLE,
requests_counter,
END_COUNTER_STYLE
).as_bytes())
}
fn respond_with_error_page(stream: Result<net::tcp::TcpStream, IoError>) {
WebServer::force_write(stream.clone(), HTTP_NOT_FOUND.as_bytes());
}
// fn respond_with_dynamic_page(stream: Result<net::tcp::TcpStream, IoError>, request_path: Path) {
// // WebServer::respond_with_static_page(stream, request_path);
// }
// TODO: Application-layer file caching.
fn respond_with_static_page(stream: net::tcp::TcpStream, request_path: Path) {
let mut stream = stream;
let mut file = match File::open(&request_path) {
Err(why) => {
debug!("File couln't be opened because: {} kind: {}", why.desc, why.kind);
return;
},
Ok(f) => { f }
};
stream.write(HTTP_SUCCESS.as_bytes());
stream.write(CONTENT_TYPE_HTML.as_bytes());
loop {
let mut buf = vec!();
match file.push_at_least(FILE_CHUNK, FILE_CHUNK, &mut buf) {
Err(why) => {
debug!("File reading problem: {}, {}", why.kind, why.desc)
if buf.len() > 0 {
stream.write(buf.as_slice());
}
return;
},
Ok(read_bytes_size) => {
match stream.write(buf.as_slice()) {
Err(why) => {
debug!("Stream broken: desc: {}, kind: {}", why.desc, why.kind);
return;
},
Ok(_) => {}
}
}
}
}
}
fn enqueue_static_file_request(
stream: Result<net::tcp::TcpStream, IoError>,
request_path: Path,
peer_name: SocketAddr,
stream_map_arc: Arc<Mutex<HashMap<SocketAddr, Result<net::tcp::TcpStream, IoError>>>>,
request_queue_arc: Arc<Mutex<BinaryHeap<HTTPRequest>>>,
notify_sender: Sender<()>,
) {
debug!("Enqueuing static file, waiting for streams lock... to stream: {}", request_path.display());
let mut local_map = stream_map_arc.lock();
local_map.insert(peer_name.clone(), stream);
debug!("Enqueuing static file, waiting for requests lock...");
let mut local_req = request_queue_arc.lock();
local_req.push(
HTTPRequest {
peer_name: peer_name.clone(),
path: request_path.clone(),
file_size: 1,
priority: 1
});
println!("enqueue_static_file_request: request_queue_arc length: {}", local_req.len());
println!("enqueue_static_file_request: stream_map_arc length: {}", local_map.len());
notify_sender.send(());
}
fn dequeue_static_file_request(&mut self) {
let stream_map_arc = self.stream_map_arc.clone();
let request_queue_arc = self.request_queue_arc.clone();
loop {
debug!("Waiting for requests!");
self.notify_recv.recv();
debug!("Dequeuing static file, waiting for requests lock...");
let mut local_req = request_queue_arc.lock();
match local_req.pop() {
Some(request) => {
println!("dequeue_static_file_request, request_queue_arc length: {}, {}",
local_req.len(),
&request.path.display()
);
debug!("Dequeuing static file, waiting for streams lock...");
let mut local_map = stream_map_arc.lock();
println!("dequeue_static_file_request, stream_map_arc length: {}", local_map.len());
match local_map.remove(&request.peer_name) {
None => { },
Some(stream) => match stream {
Ok(res) => {
self.concurrency_limit.acquire();
let child_concurrency_limit = self.concurrency_limit.clone();
let res = res.clone();
spawn(proc(){
WebServer::respond_with_static_page(res, request.path);
child_concurrency_limit.release();
debug!("=====Terminated connection from [{}:{}].=====",
&request.peer_name.ip, &request.peer_name.port
);
});
},
Err(_) => {
debug!("Stream had broken in the meantime.");
}
}
}
}
None => {}
}
}
}
fn peer_name(stream: Result<net::tcp::TcpStream, IoError>) -> net::ip::SocketAddr {
match stream {
Err(_) => panic!("Stream broken # peer_name"),
Ok(res) => {
match res.clone().peer_name() {
Ok(addr) => addr,
Err(_) => panic!("Couldn't obtain peername from stream")
}
}
}
}
fn get_request_path(root: Path, buf: [u8, ..500]) -> Result<Path, &'static str> {
match str::from_utf8(buf.as_slice()) {
Some(request_str) => {
let request_headers: Vec<&str> = request_str.splitn(3, ' ').collect();
if request_headers.len() == 4 {
Ok(root.join(Path::new(format!("./{}", request_headers[1]))))
} else {
Err("Bad headers")
}
},
None => {
Err("Empty headers")
}
}
}
fn force_write(stream: Result<net::tcp::TcpStream, IoError>, content: &[u8]) {
let mut stream = stream;
match stream.as_mut() {
Err(_) => { debug!("Well. I wanted to safetly write to a... BROKEN stream."); },
Ok(res) => {
// ?? wtf
match res.write(content) {
Err(_) => { },
Ok(_) => { }
}
}
}
}
}
The problem is that when I try to stream a file I'm getting threads(due to the streams not freed?) left open and RAM usage is increasing slowly. I've narrowed it down to the streaming part because for instance if I'll put sleep(Duration::seconds(2)) and not try to stream a file, the queue will grow and over time go back to zero. Such thing doesn't happen when I try to stream a file.
Clean state:
After running httpref:
And future requests are waiting forever for streams lock. I've tried hunting for infinite loops somewhere but without a success - everything seems to work fine.
Do you have any suggestion what might cause such behavior?

You are missing your "Content-Length" header assuming the content of the file is loaded into body or you can get len some other way try
stream.write(HTTP_SUCCESS.as_bytes());
stream.write(CONTENT_TYPE_HTML.as_bytes());
stream.write(format!("Content-length: {}", body.len())).unwrap();

Related

How to add partition in kafka rust configuratgion

I want to config this file to add a number of partition option here as by default it is creating only 1 partition , but I need 10 for my data .
I dont have much knowledge of rdkafka library in rust , as I am directly using this plugin file
Can anyone guide me where can I find solution to this or what direction .
Thanks
use rdkafka::error::{KafkaError};
use rdkafka::{ClientConfig};
use rdkafka::producer::{FutureProducer, FutureRecord};
use std::fmt::Error;
use std::os::raw::{c_char, c_int, c_void};
use std::sync::mpsc::TrySendError;
use suricata::conf::ConfNode;
use suricata::{SCLogError, SCLogNotice};
const DEFAULT_BUFFER_SIZE: &str = "65535";
const DEFAULT_CLIENT_ID: &str = "rdkafka";
#[derive(Debug, Clone)]
struct ProducerConfig {
brokers: String,
topic: String,
client_id: String,
buffer: usize,
}
impl ProducerConfig {
fn new(conf: &ConfNode) -> Result<Self,Error> {
let brokers = if let Some(val) = conf.get_child_value("brokers"){
val.to_string()
}else {
SCLogError!("brokers parameter required!");
panic!();
};
let topic = if let Some(val) = conf.get_child_value("topic"){
val.to_string()
}else {
SCLogError!("topic parameter required!");
panic!();
};
let client_id = conf.get_child_value("client-id").unwrap_or(DEFAULT_CLIENT_ID);
let buffer_size = match conf
.get_child_value("buffer-size")
.unwrap_or(DEFAULT_BUFFER_SIZE)
.parse::<usize>()
{
Ok(size) => size,
Err(_) => {
SCLogError!("invalid buffer-size!");
panic!();
},
};
let config = ProducerConfig {
brokers: brokers.into(),
topic: topic.into(),
client_id: client_id.into(),
buffer: buffer_size,
};
Ok(config)
}
}
struct KafkaProducer {
producer: FutureProducer,
config: ProducerConfig,
rx: std::sync::mpsc::Receiver<String>,
count: usize,
}
impl KafkaProducer {
fn new(
config: ProducerConfig,
rx: std::sync::mpsc::Receiver<String>,
) -> Result<Self,KafkaError> {
let producer: FutureProducer = ClientConfig::new()
.set("bootstrap.servers", &config.brokers)
.set("client.id",&config.client_id)
.set("message.timeout.ms", "5000")
.create()?;
Ok(Self {
config,
producer,
rx,
count: 0,
})
}
fn run(&mut self) {
// Get a peekable iterator from the incoming channel. This allows us to
// get the next message from the channel without removing it, we can
// then remove it once its been sent to the server without error.
//
// Not sure how this will work with pipe-lining tho, will probably have
// to do some buffering here, or just accept that any log records
// in-flight will be lost.
let mut iter = self.rx.iter().peekable();
loop {
if let Some(buf) = iter.peek() {
self.count += 1;
if let Err(err) = self.producer.send_result(
FutureRecord::to(&self.config.topic)
.key("")
.payload(&buf),
) {
SCLogError!("Failed to send event to Kafka: {:?}", err);
break;
} else {
// Successfully sent. Pop it off the channel.
let _ = iter.next();
}
} else {
break;
}
}
SCLogNotice!("Producer finished: count={}", self.count,);
}
}
struct Context {
tx: std::sync::mpsc::SyncSender<String>,
count: usize,
dropped: usize,
}
unsafe extern "C" fn output_open(conf: *const c_void, init_data: *mut *mut c_void) -> c_int {
// Load configuration.
let config = ProducerConfig::new(&ConfNode::wrap(conf)).unwrap();
let (tx, rx) = std::sync::mpsc::sync_channel(config.buffer);
let mut kafka_producer = match KafkaProducer::new(config, rx) {
Ok(producer) => {
SCLogNotice!(
"KafKa Producer initialize success with brokers:{:?} | topic: {:?} | client_id: {:?} | buffer-size: {:?}",
producer.config.brokers,
producer.config.topic,
producer.config.client_id,
producer.config.buffer
);
producer
}
Err(err) => {
SCLogError!("Failed to initialize Kafka Producer: {:?}", err);
panic!()
}
};
let context = Context {
tx,
count: 0,
dropped: 0,
};
std::thread::spawn(move || {kafka_producer.run()});
// kafka_producer.run();
*init_data = Box::into_raw(Box::new(context)) as *mut _;
0
}
unsafe extern "C" fn output_close(init_data: *const c_void) {
let context = Box::from_raw(init_data as *mut Context);
SCLogNotice!(
"Kafka produce finished: count={}, dropped={}",
context.count,
context.dropped
);
std::mem::drop(context);
}
unsafe extern "C" fn output_write(
buffer: *const c_char,
buffer_len: c_int,
init_data: *const c_void,
) -> c_int {
let context = &mut *(init_data as *mut Context);
let buf = if let Ok(buf) = ffi::str_from_c_parts(buffer, buffer_len) {
buf
} else {
return -1;
};
context.count += 1;
if let Err(err) = context.tx.try_send(buf.to_string()) {
context.dropped += 1;
match err {
TrySendError::Full(_) => {
SCLogError!("Eve record lost due to full buffer");
}
TrySendError::Disconnected(_) => {
SCLogError!("Eve record lost due to broken channel{}",err);
}
}
}
00
}
unsafe extern "C" fn init_plugin() {
let file_type =
ffi::SCPluginFileType::new("kafka", output_open, output_close, output_write);
ffi::SCPluginRegisterFileType(file_type);
}
#[no_mangle]
extern "C" fn SCPluginRegister() -> *const ffi::SCPlugin {
// Rust plugins need to initialize some Suricata internals so stuff like logging works.
suricata::plugin::init();
// Register our plugin.
ffi::SCPlugin::new("Kafka Eve Filetype", "GPL-2.0", "1z3r0", init_plugin)
}

What's wrong with tokio unix socket server/client?

I have a server that broadcast messages to connected client, though the messages doesn't get delivered and my tests fails.
I'm using the following
use anyhow::Result;
use std::path::{Path, PathBuf};
use std::process::Stdio;
use std::sync::Arc;
use tokio::io::AsyncWriteExt;
use tokio::net::{UnixListener, UnixStream};
use tokio::sync::broadcast::*;
use tokio::sync::Notify;
use tokio::task::JoinHandle;
This is how I start and setup my server
pub struct Server {
#[allow(dead_code)]
tx: Sender<String>,
rx: Receiver<String>,
address: Arc<PathBuf>,
handle: Option<JoinHandle<Result<()>>>,
abort: Arc<Notify>,
}
impl Server {
pub fn new<P: AsRef<Path>>(address: P) -> Self {
let (tx, rx) = channel::<String>(400);
let address = Arc::new(address.as_ref().to_path_buf());
Self {
address,
handle: None,
tx,
rx,
abort: Arc::new(Notify::new()),
}
}
}
/// Start Server
pub async fn start(server: &mut Server) -> Result<()> {
tokio::fs::remove_file(server.address.as_path()).await.ok();
let listener = UnixListener::bind(server.address.as_path())?;
println!("[Server] Started");
let tx = server.tx.clone();
let abort = server.abort.clone();
server.handle = Some(tokio::spawn(async move {
loop {
let tx = tx.clone();
let abort1 = abort.clone();
tokio::select! {
_ = abort.notified() => break,
Ok((client, _)) = listener.accept() => {
tokio::spawn(async move { handle(client, tx, abort1).await });
}
}
}
println!("[Server] Aborted!");
Ok(())
}));
Ok(())
}
my handle function
/// Handle stream
async fn handle(mut stream: UnixStream, tx: Sender<String>, abort: Arc<Notify>) {
loop {
let mut rx = tx.subscribe();
let abort = abort.clone();
tokio::select! {
_ = abort.notified() => break,
result = rx.recv() => match result {
Ok(output) => {
stream.write_all(output.as_bytes()).await.unwrap();
stream.write(b"\n").await.unwrap();
continue;
}
Err(e) => {
println!("[Server] {e}");
break;
}
}
}
}
stream.write(b"").await.unwrap();
stream.flush().await.unwrap();
}
my connect function
/// Connect to server
async fn connect(address: Arc<PathBuf>, name: String) -> Vec<String> {
use tokio::io::{AsyncBufReadExt, BufReader};
let mut outputs = vec![];
let stream = UnixStream::connect(&*address).await.unwrap();
let mut breader = BufReader::new(stream);
let mut buf = vec![];
loop {
if let Ok(len) = breader.read_until(b'\n', &mut buf).await {
if len == 0 {
break;
} else {
let value = String::from_utf8(buf.clone()).unwrap();
print!("[{name}] {value}");
outputs.push(value)
};
buf.clear();
}
}
println!("[{name}] ENDED");
outputs
}
This what I feed to the channel and want to have broadcasted to all clients
/// Feed data
pub fn feed(tx: Sender<String>, abort: Arc<Notify>) -> Result<JoinHandle<Result<()>>> {
use tokio::io::*;
use tokio::process::Command;
Ok(tokio::spawn(async move {
let mut child = Command::new("echo")
.args(&["1\n", "2\n", "3\n", "4\n"])
.stdout(Stdio::piped())
.stderr(Stdio::null())
.stdin(Stdio::null())
.spawn()?;
let mut stdout = BufReader::new(child.stdout.take().unwrap()).lines();
loop {
let sender = tx.clone();
tokio::select! {
result = stdout.next_line() => match result {
Err(e) => {
println!("[Server] FAILED to send an output to channel: {e}");
},
Ok(None) => break,
Ok(Some(output)) => {
let output = output.trim().to_string();
println!("[Server] {output}");
if !output.is_empty() {
if let Err(e) = sender.send(output) {
println!("[Server] FAILED to send an output to channel: {e}");
}
}
}
}
}
}
println!("[Server] Process Completed");
abort.notify_waiters();
Ok(())
}))
}
my failing test
#[tokio::test]
async fn test_server() -> Result<()> {
let mut server = Server::new("/tmp/testsock.socket");
start(&mut server).await?;
feed(server.tx.clone(), server.abort.clone()).unwrap();
let address = server.address.clone();
let client1 = connect(address.clone(), "Alpha".into());
let client2 = connect(address.clone(), "Beta".into());
let client3 = connect(address.clone(), "Delta".into());
let client4 = connect(address.clone(), "Gamma".into());
let (c1, c2, c3, c4) = tokio::join!(client1, client2, client3, client4,);
server.handle.unwrap().abort();
assert_eq!(c1.len(), 4, "Alpha");
assert_eq!(c2.len(), 4, "Beta");
assert_eq!(c3.len(), 4, "Delta");
assert_eq!(c4.len(), 4, "Gamma");
println!("ENDED");
Ok(())
}
Logs:
[Server] Started
[Server] 1
[Server] 2
[Server] 3
[Server] 4
[Server]
[Delta] 1
[Gamma] 1
[Alpha] 1
[Beta] 1
[Server] Process Completed
[Server] Aborted!
[Gamma] ENDED
[Alpha] ENDED
[Beta] ENDED
[Delta] ENDED
well, not an answer but I just want to suggest to use task::spawn to generate a JoinHandle from a function, then, say your handle could be:
fn handle(mut stream: UnixStream, tx: Sender<String>, abort: Arc<Notify>) -> JoinHandle {
let mut rx = tx.subscribe();
let abort = abort.clone();
task::spawn( async move {
loop {
tokio::select! {
_ = abort.notified() => break,
result = rx.recv() => match result {
Ok(output) => {
stream.write_all(output.as_bytes()).await.unwrap();
stream.write(b"\n").await.unwrap();
continue;
}
Err(e) => {
println!("[Server] {e}");
break;
}
}
}
}
stream.write(b"").await.unwrap();
stream.flush().await.unwrap();
})
}
I mean, I did not tested this, but I see a sort of duplication in the code above, like 2 loop, 2 select! and twice the abort check

What lifetimes and bounds are needed to generalize this async code? [duplicate]

This question already has answers here:
How to fix lifetime error when function returns a serde Deserialize type?
(2 answers)
Why do Rust lifetimes matter when I move values into a spawned Tokio task?
(1 answer)
Closed 1 year ago.
I have this websocket code that uses tokio and serde here:
use async_once::AsyncOnce;
use common_wasm::models::status::{CommandMessage, StatusMessage};
use futures_util::{SinkExt, StreamExt};
use lazy_static::lazy_static;
use std::{collections::VecDeque, net::SocketAddr};
use tokio::{
net::{TcpListener, TcpStream}, sync::{broadcast, mpsc}
};
use tokio_tungstenite::{
accept_async, tungstenite::{Error, Message, Result}
};
use tracing::*;
// https://stackoverflow.com/questions/67650879/rust-lazy-static-with-async-await
lazy_static! {
pub static ref STATUS_REPORTER: AsyncOnce<StatusWs> = AsyncOnce::new(async {
info!("Init lazy static WS");
let server = StatusWs::init("ws://localhost:44444").await;
server
});
}
use StatusMessage as SenderType;
use CommandMessage as ReceiveType;
pub struct StatusWs {
buf: VecDeque<ReceiveType>,
rx_client_msg: mpsc::Receiver<ReceiveType>,
tx_server_msg: broadcast::Sender<SenderType>,
}
impl StatusWs {
pub async fn init(addr: &str) -> StatusWs {
info!("Init Status WS on {}", addr);
let listener = TcpListener::bind(&addr).await.expect("Can't listen");
// Clients producting to server, they use the tx to send and server uses the rx to read
let (tx_client_msg, rx_client_msg) = mpsc::channel::<ReceiveType>(32);
// spmc for server to broadcast status to listeners. Server uses tx to send and client uses rx to read
let (tx_server_msg, _rx_server_msg) = broadcast::channel::<SenderType>(10);
let tx_server_2 = tx_server_msg.clone();
tokio::spawn(async move {
while let Ok((stream, peer)) = listener.accept().await {
info!("Peer address connected: {}", peer);
let tx_client = tx_client_msg.clone();
let rx_server = tx_server_msg.subscribe();
tokio::spawn(async move {
accept_connection(peer, stream, tx_client, rx_server).await;
});
}
});
StatusWs { buf: VecDeque::new(), rx_client_msg, tx_server_msg: tx_server_2 }
}
pub async fn reportinfo(&self, msg: &SenderType) {
let my_msg = msg.clone();
match &self.tx_server_msg.send(my_msg) {
Ok(_size) => {
//trace!("Server Sending OK {}", size)
},
Err(_err) => {
//trace!("Server Sending ERR {:?}", err)
},
}
}
pub async fn next(&mut self) -> Result<Option<ReceiveType>> {
loop {
// If buffer contains data, we can directly return it.
if let Some(data) = self.buf.pop_front() {
return Ok(Some(data));
}
// Fetch new response if buffer is empty.
let response = self.next_response().await?;
// Handle the response, possibly adding to the buffer
self.handle_response(response)?;
}
}
async fn next_response(&mut self) -> Result<ReceiveType> {
loop {
tokio::select! { // TODO don't need select if there's only one thing?
Some(msg) = self.rx_client_msg.recv() => {
return Ok(msg)
},
}
}
}
fn handle_response(&mut self, response: ReceiveType) -> Result<()> {
self.buf.push_back(response);
Ok(())
}
}
async fn accept_connection(peer: SocketAddr, stream: TcpStream, tx_client: mpsc::Sender<ReceiveType>, rx_server: broadcast::Receiver<SenderType>) {
info!("Accepting connection from {}", peer);
if let Err(e) = handle_connection(peer, stream, tx_client, rx_server).await {
match e {
Error::ConnectionClosed | Error::Protocol(_) | Error::Utf8 => error!("Connection closed"),
err => error!("Error processing connection: {}", err),
}
}
}
async fn handle_connection(
_peer: SocketAddr, stream: TcpStream, tx_client: mpsc::Sender<ReceiveType>, mut rx_server: broadcast::Receiver<SenderType>,
) -> Result<()> {
let ws_stream = accept_async(stream).await.expect("Failed to accept");
let (mut ws_sender, mut ws_receiver) = ws_stream.split();
loop {
tokio::select! {
remote_msg = ws_receiver.next() => {
match remote_msg {
Some(msg) => {
let msg = msg?;
match msg {
Message::Text(resptxt) => {
match serde_json::from_str::<ReceiveType>(&resptxt) {
Ok(cmd) => { let _ = tx_client.send(cmd).await; },
Err(err) => error!("Error deserializing: {}", err),
}
},
Message::Close(_) => break,
_ => { },
}
}
None => break,
}
}
Ok(msg) = rx_server.recv() => {
match serde_json::to_string(&msg) {
Ok(txt) => ws_sender.send(Message::Text(txt)).await?,
Err(_) => todo!(),
}
}
}
}
Ok(())
}
The sender and receiver types are simple (simple types all the way down):
use std::{collections::BTreeMap, fmt::Debug};
use serde::{Deserialize, Serialize};
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct StatusMessage {
pub name: String,
pub entries: BTreeMap<i32, GuiEntry>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CommandMessage {
pub sender: String,
pub entryid: i32,
pub command: GuiValue,
}
Now I want to generalize the code so that I can create a struct that takes some other kind of Sender and Receiver type. Yes, I could just change the aliases, but I want to be able to use the generic type arguments rather than duplicate the whole file. The problem is as I follow the suggestions from the compiler, I end up in a place where I don't know what to do next. It's telling me resptext does not live long enough:
`resptxt` does not live long enough
borrowed value does not live long enoughrust cE0597
status_ws.rs(133, 29): `resptxt` dropped here while still borrowed
status_ws.rs(115, 28): lifetime `'a` defined here
status_ws.rs(129, 39): argument requires that `resptxt` is borrowed for `'a`
Here's what I have thus far:
use async_once::AsyncOnce;
use common_wasm::models::status::{CommandMessage, StatusMessage};
use futures_util::{SinkExt, StreamExt};
use lazy_static::lazy_static;
use serde::{Serialize, Deserialize};
use std::{collections::VecDeque, net::SocketAddr};
use tokio::{
net::{TcpListener, TcpStream}, sync::{broadcast, mpsc}
};
use tokio_tungstenite::{
accept_async, tungstenite::{Error, Message, Result}
};
use tracing::*;
// https://stackoverflow.com/questions/67650879/rust-lazy-static-with-async-await
lazy_static! {
pub static ref STATUS_REPORTER: AsyncOnce<StatusWs<CommandMessage, StatusMessage>> = AsyncOnce::new(async {
info!("Init lazy static WS");
let server = StatusWs::init("ws://localhost:44444").await;
server
});
}
// use StatusMessage as SenderType;
// use CommandMessage as ReceiveType;
pub struct StatusWs<ReceiveType, SenderType> {
buf: VecDeque<ReceiveType>,
rx_client_msg: mpsc::Receiver<ReceiveType>,
tx_server_msg: broadcast::Sender<SenderType>,
}
impl <'a, ReceiveType: Deserialize<'a> + Send, SenderType: Serialize + Clone + Send + Sync> StatusWs <ReceiveType, SenderType> {
pub async fn init(addr: &str) -> StatusWs<ReceiveType, SenderType> {
info!("Init Status WS on {}", addr);
let listener = TcpListener::bind(&addr).await.expect("Can't listen");
// Clients producting to server, they use the tx to send and server uses the rx to read
let (tx_client_msg, rx_client_msg) = mpsc::channel::<ReceiveType>(32);
// spmc for server to broadcast status to listeners. Server uses tx to send and client uses rx to read
let (tx_server_msg, _rx_server_msg) = broadcast::channel::<SenderType>(10);
let tx_server_2 = tx_server_msg.clone();
tokio::spawn(async move {
while let Ok((stream, peer)) = listener.accept().await {
info!("Peer address connected: {}", peer);
let tx_client = tx_client_msg.clone();
let rx_server = tx_server_msg.subscribe();
tokio::spawn(async move {
accept_connection(peer, stream, tx_client, rx_server).await;
});
}
});
StatusWs { buf: VecDeque::new(), rx_client_msg, tx_server_msg: tx_server_2 }
}
pub async fn reportinfo(&self, msg: &SenderType) {
let my_msg = msg.clone();
match &self.tx_server_msg.send(my_msg) {
Ok(_size) => {
//trace!("Server Sending OK {}", size)
},
Err(_err) => {
//trace!("Server Sending ERR {:?}", err)
},
}
}
pub async fn next(&mut self) -> Result<Option<ReceiveType>> {
loop {
// If buffer contains data, we can directly return it.
if let Some(data) = self.buf.pop_front() {
return Ok(Some(data));
}
// Fetch new response if buffer is empty.
let response = self.next_response().await?;
// Handle the response, possibly adding to the buffer
self.handle_response(response)?;
}
}
async fn next_response(&mut self) -> Result<ReceiveType> {
loop {
tokio::select! { // TODO don't need select if there's only one thing?
Some(msg) = self.rx_client_msg.recv() => {
return Ok(msg)
},
}
}
}
fn handle_response(&mut self, response: ReceiveType) -> Result<()> {
self.buf.push_back(response);
Ok(())
}
}
async fn accept_connection<'a, ReceiveType: Deserialize<'a>, SenderType: Clone + Serialize>(peer: SocketAddr, stream: TcpStream, tx_client: mpsc::Sender<ReceiveType>, rx_server: broadcast::Receiver<SenderType>) {
info!("Accepting connection from {}", peer);
if let Err(e) = handle_connection(peer, stream, tx_client, rx_server).await {
match e {
Error::ConnectionClosed | Error::Protocol(_) | Error::Utf8 => error!("Connection closed"),
err => error!("Error processing connection: {}", err),
}
}
}
async fn handle_connection<'a, ReceiveType: Deserialize<'a>, SenderType: Clone + Serialize>(
_peer: SocketAddr, stream: TcpStream, tx_client: mpsc::Sender<ReceiveType>, mut rx_server: broadcast::Receiver<SenderType>,
) -> Result<()> {
let ws_stream = accept_async(stream).await.expect("Failed to accept");
let (mut ws_sender, mut ws_receiver) = ws_stream.split();
loop {
tokio::select! {
remote_msg = ws_receiver.next() => {
match remote_msg {
Some(msg) => {
let msg = msg?;
match msg {
Message::Text(resptxt) => {
match serde_json::from_str::<ReceiveType>(&resptxt) {
Ok(cmd) => { let _ = tx_client.send(cmd).await; },
Err(err) => error!("Error deserializing: {}", err),
}
},
Message::Close(_) => break,
_ => { },
}
}
None => break,
}
}
Ok(msg) = rx_server.recv() => {
match serde_json::to_string(&msg) {
Ok(txt) => ws_sender.send(Message::Text(txt)).await?,
Err(_) => todo!(),
}
}
}
}
Ok(())
}
I think there's some confusion about the necessary lifetimes and bounds, in particular the lifetime on the Deserializer from Serde and the Send/Sync auto trait markers on the message types.
In any case, it seems a bit brute force to just copy the whole original file and change out the aliases, which would definitely work, when it seems there's some sort of useful lesson here.
You should use serde::de::DeserializeOwned instead of Deserialize<'a>.
The Deserialize trait takes a lifetime parameter to support zero-cost deserialization, but you can't take advantage of that since the source, resptxt, is a transient value that isn't persisted anywhere. The DeserializeOwned trait can be used to constrain that the deserialized type does not keep references to the source and can therefore be used beyond it.
After fixing that, you'll get errors that ReceiveType and SenderType must be 'static to be used in a tokio::spawn'd task. Adding that constraint finally makes your code compile.
See the full compiling code on the playground for brevity.

color_quant::NeuQuant compiled to WebAssembly outputs zero values

I am trying to load an image in the browser and use the NewQuant algorithm to quantize my image buffer it in Rust via WebAssembly. However, the NewQuant output contains zero values, regardless of what PNG I try to feed it.
I expose two Rust methods to WASM:
alloc for allocating a byte buffer
read_img which will read and process the img buffer
I know that I get zero values because I imported a JavaScript method called log_nr for logging simple u8 numbers. The buffer seems to contain valid pixel values.
extern crate color_quant;
extern crate image;
use color_quant::NeuQuant;
use image::{DynamicImage, GenericImage, Pixel, Rgb};
use std::collections::BTreeMap;
use std::mem;
use std::os::raw::c_void;
static NQ_SAMPLE_FACTION: i32 = 10;
static NQ_PALETTE_SIZE: usize = 256;
extern "C" {
fn log(s: &str, len: usize);
fn log_nr(nr: u8);
}
fn get_pixels(img: DynamicImage) -> Vec<u8> {
let mut pixels = Vec::new();
for (_, _, px) in img.pixels() {
let rgba = px.to_rgba();
for channel in px.channels() {
pixels.push(*channel);
}
}
pixels
}
#[no_mangle]
pub extern "C" fn alloc(size: usize) -> *mut c_void {
let mut buf = Vec::with_capacity(size);
let ptr = buf.as_mut_ptr();
mem::forget(buf);
return ptr as *mut c_void;
}
fn process_img(img: DynamicImage) {
let pixels: Vec<u8> = get_pixels(img);
let quantized = NeuQuant::new(NQ_SAMPLE_FACTION, NQ_PALETTE_SIZE, &pixels);
let q = quantized.color_map_rgb();
for c in &q {
unsafe {
log_nr(*c);
}
}
}
#[no_mangle]
pub extern "C" fn read_img(buff_ptr: *mut u8, buff_len: usize) {
let mut img: Vec<u8> = unsafe { Vec::from_raw_parts(buff_ptr, buff_len, buff_len) };
return match image::load_from_memory(&img) {
Ok(img) => {
process_img(img);
}
Err(err) => {
let err_msg: String = err.to_string().to_owned();
let mut ns: String = "[load_from_memory] ".to_owned();
ns.push_str(&err_msg);
unsafe {
log(&ns, ns.len());
}
}
};
}
fn main() {
println!("Hello from rust 2");
}
The JavaScript code is the following:
run('sample.png');
function run(img) {
return compile().then(m => {
return loadImgIntoMem(img, m.instance.exports.memory, m.instance.exports.alloc).then(r => {
return m.instance.exports.read_img(r.imgPtr, r.len);
});
})
}
function compile(wasmFile = 'distil_wasm.gc.wasm') {
return fetch(wasmFile)
.then(r => r.arrayBuffer())
.then(r => {
let module = new WebAssembly.Module(r);
let importObject = {}
for (let imp of WebAssembly.Module.imports(module)) {
if (typeof importObject[imp.module] === "undefined")
importObject[imp.module] = {};
switch (imp.kind) {
case "function": importObject[imp.module][imp.name] = () => {}; break;
case "table": importObject[imp.module][imp.name] = new WebAssembly.Table({ initial: 256, maximum: 256, element: "anyfunc" }); break;
case "memory": importObject[imp.module][imp.name] = new WebAssembly.Memory({ initial: 256 }); break;
case "global": importObject[imp.module][imp.name] = 0; break;
}
}
importObject.env = Object.assign({}, importObject.env, {
log: (ptr, len) => console.log(ptrToStr(ptr, len)),
log_nr: (nr) => console.log(nr),
});
return WebAssembly.instantiate(r, importObject);
});
}
function loadImgIntoMemEmscripten(img) {
return new Promise(resolve => {
fetch(img)
.then(r => r.arrayBuffer())
.then(buff => {
const imgPtr = Module._malloc(buff.byteLength);
const imgHeap = new Uint8Array(Module.HEAPU8.buffer, imgPtr, buff.byteLength);
imgHeap.set(new Uint8Array(buff));
resolve({ imgPtr });
});
});
}

Polymorphism in Rust and trait references (trait objects?)

I'm writing a process memory scanner with a console prompt interface in Rust.
I need scanner types such as a winapi scanner or a ring0 driver scanner so I'm trying to implement polymorphism.
I have the following construction at this moment:
pub trait Scanner {
fn attach(&mut self, pid: u32) -> bool;
fn detach(&mut self);
}
pub struct WinapiScanner {
pid: u32,
hprocess: HANDLE,
addresses: Vec<usize>
}
impl WinapiScanner {
pub fn new() -> WinapiScanner {
WinapiScanner {
pid: 0,
hprocess: 0 as HANDLE,
addresses: Vec::<usize>::new()
}
}
}
impl Scanner for WinapiScanner {
fn attach(&mut self, pid: u32) -> bool {
let handle = unsafe { OpenProcess(PROCESS_ALL_ACCESS, FALSE, pid) };
if handle == 0 as HANDLE {
self.pid = pid;
self.hprocess = handle;
true
} else {
false
}
}
fn detach(&mut self) {
unsafe { CloseHandle(self.hprocess) };
self.pid = 0;
self.hprocess = 0 as HANDLE;
self.addresses.clear();
}
}
In future, I'll have some more scanner types besides WinapiScanner, so, if I understand correctly, I should use a trait reference (&Scanner) to implement polymorphism. I'm trying to create Scanner object like this (note the comments):
enum ScannerType {
Winapi
}
pub fn start() {
let mut scanner: Option<&mut Scanner> = None;
let mut scanner_type = ScannerType::Winapi;
loop {
let line = prompt();
let tokens: Vec<&str> = line.split_whitespace().collect();
match tokens[0] {
// commands
"scanner" => {
if tokens.len() != 2 {
println!("\"scanner\" command takes 1 argument")
} else {
match tokens[1] {
"list" => {
println!("Available scanners: winapi");
},
"winapi" => {
scanner_type = ScannerType::Winapi;
println!("Scanner type set to: winapi");
},
x => {
println!("Unknown scanner type: {}", x);
}
}
}
},
"attach" => {
if tokens.len() > 1 {
match tokens[1].parse::<u32>() {
Ok(pid) => {
scanner = match scanner_type {
// ----------------------
// Problem goes here.
// Object, created by WinapiScanner::new() constructor
// doesn't live long enough to borrow it here
ScannerType::Winapi => Some(&mut WinapiScanner::new())
// ----------------------
}
}
Err(_) => {
println!("Wrong pid");
}
}
}
},
x => println!("Unknown command: {}", x)
}
}
}
fn prompt() -> String {
use std::io::Write;
use std::io::BufRead;
let stdout = io::stdout();
let mut lock = stdout.lock();
let _ = lock.write(">> ".as_bytes());
let _ = lock.flush();
let stdin = io::stdin();
let mut lock = stdin.lock();
let mut buf = String::new();
let _ = lock.read_line(&mut buf);
String::from(buf.trim())
}
It's not a full program; I've pasted important parts only.
What am I doing wrong and how do I implement what I want in Rust?
Trait objects must be used behind a pointer. But references are not the only kind of pointers; Box is also a pointer!
let mut scanner: Option<Box<Scanner>> = None;
scanner = match scanner_type {
ScannerType::Winapi => Some(Box::new(WinapiScanner::new()))
}

Resources