Equivalent of Python's subprocess.communicate in Rust? - io

I'm trying to port this Python script that sends and receives input to a helper process to Rust:
import subprocess
data = chr(0x3f) * 1024 * 4096
child = subprocess.Popen(['cat'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
output, _ = child.communicate(data)
assert output == data
My attempt worked fine until the input buffer exceeded 64k because presumably the OS's pipe buffer filled up before the input was written.
use std::io::Write;
const DATA: [u8; 1024 * 4096] = [0x3f; 1024 * 4096];
fn main() {
let mut child = std::process::Command::new("cat")
.stdout(std::process::Stdio::piped())
.stdin(std::process::Stdio::piped())
.spawn()
.unwrap();
match child.stdin {
Some(ref mut stdin) => {
match stdin.write_all(&DATA[..]) {
Ok(_size) => {}
Err(err) => panic!(err),
}
}
None => unreachable!(),
}
let res = child.wait_with_output();
assert_eq!(res.unwrap().stdout.len(), DATA.len())
}
Is there a subprocess.communicate equivalent in Rust? Maybe a select equivalent? Can mio be used to solve this problem? Also, there seems to be no way to close stdin.
The goal here is to make a high performance system, so I want to avoid spawning a thread per task.

Well it wasn't a small amount of code to get this done, and I needed a combination of mio and nix, because mio wouldn't set AsRawFd items to be nonblocking when they were pipes, so this had to be done first.
Here's the result
extern crate mio;
extern crate bytes;
use mio::*;
use std::io;
use mio::unix::{PipeReader, PipeWriter};
use std::process::{Command, Stdio};
use std::os::unix::io::AsRawFd;
use nix::fcntl::FcntlArg::F_SETFL;
use nix::fcntl::{fcntl, O_NONBLOCK};
extern crate nix;
struct SubprocessClient {
stdin: PipeWriter,
stdout: PipeReader,
output : Vec<u8>,
input : Vec<u8>,
input_offset : usize,
buf : [u8; 65536],
}
// Sends a message and expects to receive the same exact message, one at a time
impl SubprocessClient {
fn new(stdin: PipeWriter, stdout : PipeReader, data : &[u8]) -> SubprocessClient {
SubprocessClient {
stdin: stdin,
stdout: stdout,
output : Vec::<u8>::new(),
buf : [0; 65536],
input : data.to_vec(),
input_offset : 0,
}
}
fn readable(&mut self, _event_loop: &mut EventLoop<SubprocessClient>) -> io::Result<()> {
println!("client socket readable");
match self.stdout.try_read(&mut self.buf[..]) {
Ok(None) => {
println!("CLIENT : spurious read wakeup");
}
Ok(Some(r)) => {
println!("CLIENT : We read {} bytes!", r);
self.output.extend(&self.buf[0..r]);
}
Err(e) => {
return Err(e);
}
};
return Ok(());
}
fn writable(&mut self, event_loop: &mut EventLoop<SubprocessClient>) -> io::Result<()> {
println!("client socket writable");
match self.stdin.try_write(&(&self.input)[self.input_offset..]) {
Ok(None) => {
println!("client flushing buf; WOULDBLOCK");
}
Ok(Some(r)) => {
println!("CLIENT : we wrote {} bytes!", r);
self.input_offset += r;
}
Err(e) => println!("not implemented; client err={:?}", e)
}
if self.input_offset == self.input.len() {
event_loop.shutdown();
}
return Ok(());
}
}
impl Handler for SubprocessClient {
type Timeout = usize;
type Message = ();
fn ready(&mut self, event_loop: &mut EventLoop<SubprocessClient>, token: Token,
events: EventSet) {
println!("ready {:?} {:?}", token, events);
if events.is_readable() {
let _x = self.readable(event_loop);
}
if events.is_writable() {
let _y = self.writable(event_loop);
}
}
}
pub fn from_nix_error(err: ::nix::Error) -> io::Error {
io::Error::from_raw_os_error(err.errno() as i32)
}
fn set_nonblock(s: &AsRawFd) -> io::Result<()> {
fcntl(s.as_raw_fd(), F_SETFL(O_NONBLOCK)).map_err(from_nix_error)
.map(|_| ())
}
const TEST_DATA : [u8; 1024 * 4096] = [40; 1024 * 4096];
pub fn echo_server() {
let mut event_loop = EventLoop::<SubprocessClient>::new().unwrap();
let process =
Command::new("cat")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn().unwrap();
let raw_stdin_fd;
match process.stdin {
None => unreachable!(),
Some(ref item) => {
let err = set_nonblock(item);
match err {
Ok(()) => {},
Err(e) => panic!(e),
}
raw_stdin_fd = item.as_raw_fd();
},
}
let raw_stdout_fd;
match process.stdout {
None => unreachable!(),
Some(ref item) => {
let err = set_nonblock(item);
match err {
Ok(()) => {},
Err(e) => panic!(e),
}
raw_stdout_fd = item.as_raw_fd();},
}
//println!("listen for connections {:?} {:?}", , process.stdout.unwrap().as_raw_fd());
let mut subprocess = SubprocessClient::new(PipeWriter::from(Io::from_raw_fd(raw_stdin_fd)),
PipeReader::from(Io::from_raw_fd(raw_stdout_fd)),
&TEST_DATA[..]);
let stdout_token : Token = Token(0);
let stdin_token : Token = Token(1);
event_loop.register(&subprocess.stdout, stdout_token, EventSet::readable(),
PollOpt::level()).unwrap();
// Connect to the server
event_loop.register(&subprocess.stdin, stdin_token, EventSet::writable(),
PollOpt::level()).unwrap();
// Start the event loop
event_loop.run(&mut subprocess).unwrap();
let res = process.wait_with_output();
match res {
Err(e) => {panic!(e);},
Ok(output) => {
subprocess.output.extend(&output.stdout);
println!("Final output was {:}\n", output.stdout.len());
},
}
println!("{:?}\n", subprocess.output.len());
}
fn main() {
echo_server();
}
Basically the only way to close stdin was to call process.wait_with_output since the Stdin has no close primitive
Once this happened, the remaining input could extend the output data vector.
There's now a crate that does this
https://crates.io/crates/subprocess-communicate

In this particular example, you know that the input and output amounts are equivalent, so you don't need threads at all. You can just write a bit and then read a bit:
use std::io::{self, Cursor, Read, Write};
static DATA: [u8; 1024 * 4096] = [0x3f; 1024 * 4096];
const TRANSFER_LIMIT: u64 = 32 * 1024;
fn main() {
let mut child = std::process::Command::new("cat")
.stdout(std::process::Stdio::piped())
.stdin(std::process::Stdio::piped())
.spawn()
.expect("Could not start child");
let mut input = Cursor::new(&DATA[..]);
let mut output = Cursor::new(Vec::new());
match (child.stdin.as_mut(), child.stdout.as_mut()) {
(Some(stdin), Some(stdout)) => {
while input.position() < input.get_ref().len() as u64 {
io::copy(&mut input.by_ref().take(TRANSFER_LIMIT), stdin).expect("Could not copy input");
io::copy(&mut stdout.take(TRANSFER_LIMIT), &mut output).expect("Could not copy output");
}
},
_ => panic!("child process input and output were not opened"),
}
child.wait().expect("Could not join child");
let res = output.into_inner();
assert_eq!(res.len(), DATA.len());
assert_eq!(&*res, &DATA[..]);
}
If you didn't have that specific restriction, you will need to use select from the libc crate, which requires file descriptors for the pipes so will probably restrict your code to running on Linux / OS X.
You could also start threads, one for each pipe (and reuse the parent thread for one of the pipes), but you've already ruled that out.

Related

How to add partition in kafka rust configuratgion

I want to config this file to add a number of partition option here as by default it is creating only 1 partition , but I need 10 for my data .
I dont have much knowledge of rdkafka library in rust , as I am directly using this plugin file
Can anyone guide me where can I find solution to this or what direction .
Thanks
use rdkafka::error::{KafkaError};
use rdkafka::{ClientConfig};
use rdkafka::producer::{FutureProducer, FutureRecord};
use std::fmt::Error;
use std::os::raw::{c_char, c_int, c_void};
use std::sync::mpsc::TrySendError;
use suricata::conf::ConfNode;
use suricata::{SCLogError, SCLogNotice};
const DEFAULT_BUFFER_SIZE: &str = "65535";
const DEFAULT_CLIENT_ID: &str = "rdkafka";
#[derive(Debug, Clone)]
struct ProducerConfig {
brokers: String,
topic: String,
client_id: String,
buffer: usize,
}
impl ProducerConfig {
fn new(conf: &ConfNode) -> Result<Self,Error> {
let brokers = if let Some(val) = conf.get_child_value("brokers"){
val.to_string()
}else {
SCLogError!("brokers parameter required!");
panic!();
};
let topic = if let Some(val) = conf.get_child_value("topic"){
val.to_string()
}else {
SCLogError!("topic parameter required!");
panic!();
};
let client_id = conf.get_child_value("client-id").unwrap_or(DEFAULT_CLIENT_ID);
let buffer_size = match conf
.get_child_value("buffer-size")
.unwrap_or(DEFAULT_BUFFER_SIZE)
.parse::<usize>()
{
Ok(size) => size,
Err(_) => {
SCLogError!("invalid buffer-size!");
panic!();
},
};
let config = ProducerConfig {
brokers: brokers.into(),
topic: topic.into(),
client_id: client_id.into(),
buffer: buffer_size,
};
Ok(config)
}
}
struct KafkaProducer {
producer: FutureProducer,
config: ProducerConfig,
rx: std::sync::mpsc::Receiver<String>,
count: usize,
}
impl KafkaProducer {
fn new(
config: ProducerConfig,
rx: std::sync::mpsc::Receiver<String>,
) -> Result<Self,KafkaError> {
let producer: FutureProducer = ClientConfig::new()
.set("bootstrap.servers", &config.brokers)
.set("client.id",&config.client_id)
.set("message.timeout.ms", "5000")
.create()?;
Ok(Self {
config,
producer,
rx,
count: 0,
})
}
fn run(&mut self) {
// Get a peekable iterator from the incoming channel. This allows us to
// get the next message from the channel without removing it, we can
// then remove it once its been sent to the server without error.
//
// Not sure how this will work with pipe-lining tho, will probably have
// to do some buffering here, or just accept that any log records
// in-flight will be lost.
let mut iter = self.rx.iter().peekable();
loop {
if let Some(buf) = iter.peek() {
self.count += 1;
if let Err(err) = self.producer.send_result(
FutureRecord::to(&self.config.topic)
.key("")
.payload(&buf),
) {
SCLogError!("Failed to send event to Kafka: {:?}", err);
break;
} else {
// Successfully sent. Pop it off the channel.
let _ = iter.next();
}
} else {
break;
}
}
SCLogNotice!("Producer finished: count={}", self.count,);
}
}
struct Context {
tx: std::sync::mpsc::SyncSender<String>,
count: usize,
dropped: usize,
}
unsafe extern "C" fn output_open(conf: *const c_void, init_data: *mut *mut c_void) -> c_int {
// Load configuration.
let config = ProducerConfig::new(&ConfNode::wrap(conf)).unwrap();
let (tx, rx) = std::sync::mpsc::sync_channel(config.buffer);
let mut kafka_producer = match KafkaProducer::new(config, rx) {
Ok(producer) => {
SCLogNotice!(
"KafKa Producer initialize success with brokers:{:?} | topic: {:?} | client_id: {:?} | buffer-size: {:?}",
producer.config.brokers,
producer.config.topic,
producer.config.client_id,
producer.config.buffer
);
producer
}
Err(err) => {
SCLogError!("Failed to initialize Kafka Producer: {:?}", err);
panic!()
}
};
let context = Context {
tx,
count: 0,
dropped: 0,
};
std::thread::spawn(move || {kafka_producer.run()});
// kafka_producer.run();
*init_data = Box::into_raw(Box::new(context)) as *mut _;
0
}
unsafe extern "C" fn output_close(init_data: *const c_void) {
let context = Box::from_raw(init_data as *mut Context);
SCLogNotice!(
"Kafka produce finished: count={}, dropped={}",
context.count,
context.dropped
);
std::mem::drop(context);
}
unsafe extern "C" fn output_write(
buffer: *const c_char,
buffer_len: c_int,
init_data: *const c_void,
) -> c_int {
let context = &mut *(init_data as *mut Context);
let buf = if let Ok(buf) = ffi::str_from_c_parts(buffer, buffer_len) {
buf
} else {
return -1;
};
context.count += 1;
if let Err(err) = context.tx.try_send(buf.to_string()) {
context.dropped += 1;
match err {
TrySendError::Full(_) => {
SCLogError!("Eve record lost due to full buffer");
}
TrySendError::Disconnected(_) => {
SCLogError!("Eve record lost due to broken channel{}",err);
}
}
}
00
}
unsafe extern "C" fn init_plugin() {
let file_type =
ffi::SCPluginFileType::new("kafka", output_open, output_close, output_write);
ffi::SCPluginRegisterFileType(file_type);
}
#[no_mangle]
extern "C" fn SCPluginRegister() -> *const ffi::SCPlugin {
// Rust plugins need to initialize some Suricata internals so stuff like logging works.
suricata::plugin::init();
// Register our plugin.
ffi::SCPlugin::new("Kafka Eve Filetype", "GPL-2.0", "1z3r0", init_plugin)
}

What's wrong with tokio unix socket server/client?

I have a server that broadcast messages to connected client, though the messages doesn't get delivered and my tests fails.
I'm using the following
use anyhow::Result;
use std::path::{Path, PathBuf};
use std::process::Stdio;
use std::sync::Arc;
use tokio::io::AsyncWriteExt;
use tokio::net::{UnixListener, UnixStream};
use tokio::sync::broadcast::*;
use tokio::sync::Notify;
use tokio::task::JoinHandle;
This is how I start and setup my server
pub struct Server {
#[allow(dead_code)]
tx: Sender<String>,
rx: Receiver<String>,
address: Arc<PathBuf>,
handle: Option<JoinHandle<Result<()>>>,
abort: Arc<Notify>,
}
impl Server {
pub fn new<P: AsRef<Path>>(address: P) -> Self {
let (tx, rx) = channel::<String>(400);
let address = Arc::new(address.as_ref().to_path_buf());
Self {
address,
handle: None,
tx,
rx,
abort: Arc::new(Notify::new()),
}
}
}
/// Start Server
pub async fn start(server: &mut Server) -> Result<()> {
tokio::fs::remove_file(server.address.as_path()).await.ok();
let listener = UnixListener::bind(server.address.as_path())?;
println!("[Server] Started");
let tx = server.tx.clone();
let abort = server.abort.clone();
server.handle = Some(tokio::spawn(async move {
loop {
let tx = tx.clone();
let abort1 = abort.clone();
tokio::select! {
_ = abort.notified() => break,
Ok((client, _)) = listener.accept() => {
tokio::spawn(async move { handle(client, tx, abort1).await });
}
}
}
println!("[Server] Aborted!");
Ok(())
}));
Ok(())
}
my handle function
/// Handle stream
async fn handle(mut stream: UnixStream, tx: Sender<String>, abort: Arc<Notify>) {
loop {
let mut rx = tx.subscribe();
let abort = abort.clone();
tokio::select! {
_ = abort.notified() => break,
result = rx.recv() => match result {
Ok(output) => {
stream.write_all(output.as_bytes()).await.unwrap();
stream.write(b"\n").await.unwrap();
continue;
}
Err(e) => {
println!("[Server] {e}");
break;
}
}
}
}
stream.write(b"").await.unwrap();
stream.flush().await.unwrap();
}
my connect function
/// Connect to server
async fn connect(address: Arc<PathBuf>, name: String) -> Vec<String> {
use tokio::io::{AsyncBufReadExt, BufReader};
let mut outputs = vec![];
let stream = UnixStream::connect(&*address).await.unwrap();
let mut breader = BufReader::new(stream);
let mut buf = vec![];
loop {
if let Ok(len) = breader.read_until(b'\n', &mut buf).await {
if len == 0 {
break;
} else {
let value = String::from_utf8(buf.clone()).unwrap();
print!("[{name}] {value}");
outputs.push(value)
};
buf.clear();
}
}
println!("[{name}] ENDED");
outputs
}
This what I feed to the channel and want to have broadcasted to all clients
/// Feed data
pub fn feed(tx: Sender<String>, abort: Arc<Notify>) -> Result<JoinHandle<Result<()>>> {
use tokio::io::*;
use tokio::process::Command;
Ok(tokio::spawn(async move {
let mut child = Command::new("echo")
.args(&["1\n", "2\n", "3\n", "4\n"])
.stdout(Stdio::piped())
.stderr(Stdio::null())
.stdin(Stdio::null())
.spawn()?;
let mut stdout = BufReader::new(child.stdout.take().unwrap()).lines();
loop {
let sender = tx.clone();
tokio::select! {
result = stdout.next_line() => match result {
Err(e) => {
println!("[Server] FAILED to send an output to channel: {e}");
},
Ok(None) => break,
Ok(Some(output)) => {
let output = output.trim().to_string();
println!("[Server] {output}");
if !output.is_empty() {
if let Err(e) = sender.send(output) {
println!("[Server] FAILED to send an output to channel: {e}");
}
}
}
}
}
}
println!("[Server] Process Completed");
abort.notify_waiters();
Ok(())
}))
}
my failing test
#[tokio::test]
async fn test_server() -> Result<()> {
let mut server = Server::new("/tmp/testsock.socket");
start(&mut server).await?;
feed(server.tx.clone(), server.abort.clone()).unwrap();
let address = server.address.clone();
let client1 = connect(address.clone(), "Alpha".into());
let client2 = connect(address.clone(), "Beta".into());
let client3 = connect(address.clone(), "Delta".into());
let client4 = connect(address.clone(), "Gamma".into());
let (c1, c2, c3, c4) = tokio::join!(client1, client2, client3, client4,);
server.handle.unwrap().abort();
assert_eq!(c1.len(), 4, "Alpha");
assert_eq!(c2.len(), 4, "Beta");
assert_eq!(c3.len(), 4, "Delta");
assert_eq!(c4.len(), 4, "Gamma");
println!("ENDED");
Ok(())
}
Logs:
[Server] Started
[Server] 1
[Server] 2
[Server] 3
[Server] 4
[Server]
[Delta] 1
[Gamma] 1
[Alpha] 1
[Beta] 1
[Server] Process Completed
[Server] Aborted!
[Gamma] ENDED
[Alpha] ENDED
[Beta] ENDED
[Delta] ENDED
well, not an answer but I just want to suggest to use task::spawn to generate a JoinHandle from a function, then, say your handle could be:
fn handle(mut stream: UnixStream, tx: Sender<String>, abort: Arc<Notify>) -> JoinHandle {
let mut rx = tx.subscribe();
let abort = abort.clone();
task::spawn( async move {
loop {
tokio::select! {
_ = abort.notified() => break,
result = rx.recv() => match result {
Ok(output) => {
stream.write_all(output.as_bytes()).await.unwrap();
stream.write(b"\n").await.unwrap();
continue;
}
Err(e) => {
println!("[Server] {e}");
break;
}
}
}
}
stream.write(b"").await.unwrap();
stream.flush().await.unwrap();
})
}
I mean, I did not tested this, but I see a sort of duplication in the code above, like 2 loop, 2 select! and twice the abort check

How to use actix field stream by two consumers?

I have an actix web service and would like to parse the contents of a multipart field while streaming with async-gcode and in addition store the contents e.g. in a database.
However, I have no clue how to feed in the stream to the Parser and at the same time collect the bytes into a Vec<u8> or a String.
The first problem I face is that field is a stream of actix::web::Bytes and not of u8.
#[post("/upload")]
pub async fn upload_job(
mut payload: Multipart,
) -> Result<HttpResponse, Error> {
let mut contents : Vec<u8> = Vec::new();
while let Ok(Some(mut field)) = payload.try_next().await {
let content_disp = field.content_disposition().unwrap();
match content_disp.get_name().unwrap() {
"file" => {
while let Some(chunk) = field.next().await {
contents.append(&mut chunk.unwrap().to_vec());
// already parse the contents
// and additionally store contents somewhere
}
}
_ => (),
}
}
Ok(HttpResponse::Ok().finish())
}
Any hint or suggestion is very much appreciated.
One of the options is to wrap field in a struct and implement Stream trait for it.
use actix_web::{HttpRequest, HttpResponse, Error};
use futures_util::stream::Stream;
use std::pin::Pin;
use actix_multipart::{Multipart, Field};
use futures::stream::{self, StreamExt};
use futures_util::TryStreamExt;
use std::task::{Context, Poll};
use async_gcode::{Parser, Error as PError};
use bytes::BytesMut;
use std::cell::RefCell;
pub struct Wrapper {
field: Field,
buffer: RefCell<BytesMut>,
index: usize,
}
impl Wrapper {
pub fn new(field: Field, buffer: RefCell<BytesMut>) -> Self {
buffer.borrow_mut().truncate(0);
Wrapper {
field,
buffer,
index: 0
}
}
}
impl Stream for Wrapper {
type Item = Result<u8, PError>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<u8, PError>>> {
if self.index == self.buffer.borrow().len() {
match Pin::new(&mut self.field).poll_next(cx) {
Poll::Ready(Some(Ok(chunk))) => self.buffer.get_mut().extend_from_slice(&chunk),
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => return Poll::Ready(None),
Poll::Ready(Some(Err(_))) => return Poll::Ready(Some(Err(PError::BadNumberFormat/* ??? */)))
};
} else {
let b = self.buffer.borrow()[self.index];
self.index += 1;
return Poll::Ready(Some(Ok(b)));
}
Poll::Ready(None)
}
}
#[post("/upload")]
pub async fn upload_job(
mut payload: Multipart,
) -> Result<HttpResponse, Error> {
while let Ok(Some(field)) = payload.try_next().await {
let content_disp = field.content_disposition().unwrap();
match content_disp.get_name().unwrap() {
"file" => {
let mut contents: RefCell<BytesMut> = RefCell::new(BytesMut::new());
let mut w = Wrapper::new(field, contents.clone());
let mut p = Parser::new(w);
while let Some(res) = p.next().await {
// Do something with results
};
// Do something with the buffer
let a = contents.get_mut()[0];
}
_ => (),
}
}
Ok(HttpResponse::Ok().finish())
}
Copying the Bytes from the Field won't be necessary when
Bytes::try_unsplit will be implemented. (https://github.com/tokio-rs/bytes/issues/287)
The answer from dmitryvm (thanks for your effort) showed me that there are actually two problems. At first, flatten the Bytes into u8's and, secondly, to "split" the stream into a buffer for later storage and the async-gcode parser.
This shows how I solved it:
#[post("/upload")]
pub async fn upload_job(
mut payload: Multipart,
) -> Result<HttpResponse, Error> {
let mut contents : Vec<u8> = Vec::new();
while let Ok(Some(mut field)) = payload.try_next().await {
let content_disp = field.content_disposition().unwrap();
match content_disp.get_name().unwrap() {
"file" => {
let field_stream = field
.map_err(|_| async_gcode::Error::BadNumberFormat) // Translate error
.map_ok(|y| { // Translate Bytes into stream with Vec<u8>
contents.extend_from_slice(&y); // Copy and store for later usage
stream::iter(y).map(Result::<_, async_gcode::Error>::Ok)
})
.try_flatten(); // Flatten the streams of u8's
let mut parser = Parser::new(field_stream);
while let Some(gcode) = parser.next().await {
// Process result from parser
}
}
_ => (),
}
}
Ok(HttpResponse::Ok().finish())
}

"blocking annotated I/O must be called from the context of the Tokio runtime" when reading stdin in async task

I'm trying to read from stdin in an async task, spawned with tokio::spawn. The
executor is crated as
let mut executor = tokio::runtime::current_thread::Runtime::new().unwrap();
the main task is then run with executor.task(...), which spawns other tasks
with tokio::spawn().
fn main then calls executor.run().unwrap(); to wait for all tasks to finish.
The problem is when I do
let mut stdin = tokio::io::stdin();
let mut read_buf: [u8; 1024] = [0; 1024];
...
stdin.read(&mut read_buf).await
I get "blocking annotated I/O must be called from the context of the Tokio runtime" error.
Dependencies:
futures-preview = { version = "0.3.0-alpha.18", features = ["async-await", "nightly"] }
tokio = "0.2.0-alpha.2"
tokio-net = "0.2.0-alpha.2"
tokio-sync = "0.2.0-alpha.2"
Full code:
extern crate futures;
extern crate tokio;
extern crate tokio_net;
extern crate tokio_sync;
use std::io::Write;
use std::net::SocketAddr;
use tokio::io::AsyncReadExt;
use tokio::net::tcp::split::{TcpStreamReadHalf, TcpStreamWriteHalf};
use tokio::net::TcpListener;
use tokio_sync::oneshot;
use futures::select;
use futures::future::FutureExt;
#[derive(Debug)]
enum AppErr {
CantBindAddr(std::io::Error),
CantAccept(std::io::Error),
}
fn main() {
let mut executor = tokio::runtime::current_thread::Runtime::new().unwrap();
executor.spawn(async {
match server_task().await {
Ok(()) => {}
Err(err) => {
println!("Error: {:?}", err);
}
}
});
executor.run().unwrap(); // ignores RunError
}
async fn server_task() -> Result<(), AppErr> {
let addr: SocketAddr = "127.0.0.1:8080".parse().unwrap();
let mut listener = TcpListener::bind(&addr).map_err(AppErr::CantBindAddr)?;
loop {
print!("Waiting for incoming connection...");
let _ = std::io::stdout().flush();
let (socket, _) = listener.accept().await.map_err(AppErr::CantAccept)?;
println!("{:?} connected.", socket);
let (read, write) = socket.split();
let (abort_in_task_snd, abort_in_task_rcv) = oneshot::channel();
let (abort_out_task_snd, abort_out_task_rcv) = oneshot::channel();
tokio::spawn(handle_incoming(read, abort_in_task_rcv, abort_out_task_snd));
tokio::spawn(handle_outgoing(
write,
abort_out_task_rcv,
abort_in_task_snd,
));
}
}
async fn handle_incoming(
mut conn: TcpStreamReadHalf,
abort_in: oneshot::Receiver<()>,
abort_out: oneshot::Sender<()>,
) {
println!("handle_incoming");
let mut read_buf: [u8; 1024] = [0; 1024];
let mut abort_in_fused = abort_in.fuse();
loop {
select! {
abort_ret = abort_in_fused => {
// TODO match abort_ret {..}
println!("abort signalled, handle_incoming returning");
return;
},
bytes = conn.read(&mut read_buf).fuse() => {
match bytes {
Err(io_err) => {
println!("io error when reading input stream: {:?}", io_err);
return;
}
Ok(bytes) => {
println!("read {} bytes: {:?}", bytes, &read_buf[0..bytes]);
}
}
}
}
}
}
async fn handle_outgoing(
conn: TcpStreamWriteHalf,
abort_in: oneshot::Receiver<()>,
abort_out: oneshot::Sender<()>,
) {
println!("handle_outgoing");
let mut stdin = tokio::io::stdin();
let mut read_buf: [u8; 1024] = [0; 1024];
let mut abort_in_fused = abort_in.fuse();
loop {
select! {
abort_ret = abort_in_fused => {
println!("abort signalled, handle_outgoing returning");
return;
}
input = stdin.read(&mut read_buf).fuse() => {
match input {
Err(io_err) => {
println!("io error when reading stdin: {:?}", io_err);
return;
}
Ok(bytes) => {
println!("handle_outgoing read {} bytes", bytes);
// TODO
}
}
},
}
}
}
Questions:
Am I doing task spawning right? Can I safely do tokio::spawn in the main
task passed to executor.spawn()?
What's wrong with the way I read stdin in this program?
Thanks
Tokio stdin blocks enclosing thread from the pool of executor because it is annotated with blocking from tokio-executor. From the reference :
When the blocking function enters, it hands off the responsibility
of processing the current work queue to another thread.
Your code is not working because the executor that you have used is multiplexing tasks in a single thread(tokio::runtime::current_thread::Runtime::new()). Thus there will remain no other thread to execute other tasks for executor.
If you properly configure your runtime(thread pool with multiple threads) your code will work fine :
fn main() {
let rt = tokio::runtime::Runtime::new().unwrap();
let mut executor = rt.executor();
executor.spawn(async {
match server_task().await {
Ok(()) => {}
Err(err) => {
println!("Error: {:?}", err);
}
}
});
rt.shutdown_on_idle();
}
See also: How can I stop reading from a tokio::io::lines stream?

serial-rs multiple bluetooth connections

Using serial-rs it's possible to open a Bluetooth connection between my Mac and Arduino (HC-05). But if I want to open multiple Bluetooth connections at the same time, only the most recent connection stays open.
I am not completely sure how Qt handles this, but it's possible to read/write to multiple devices same time using QSerialPort.
Is this a serial-rs unimplemented feature, or does Qt do something like switching connections (to have only one opened in time) so it looks like multiple connections are handled?
extern crate serial;
#[macro_use]
extern crate text_io;
use std::process::Command;
use std::io;
use std::time::Duration;
use std::sync::mpsc::{Sender, Receiver};
use std::sync::mpsc;
use std::thread;
use std::io::prelude::*;
use serial::prelude::*;
use std::sync::Arc;
use std::sync::Mutex;
fn print_available_ports() {
let status = Command::new("ls")
.arg("/dev/")
.status()
.unwrap_or_else(|e| panic!("Failed to execute process: {}", e));
}
fn print_available_commands() {
println!("Available commands:");
println!(" print_ports - prints available serial ports");
println!(" connect - make an asynchronous connection to port");
println!(" start - signal ports to start collecting data");
println!(" stop - opposite to start");
println!(" monitor - print info of current state of reading");
println!(" help - prints this info.");
println!(" exit");
}
fn connect_to_port(portname: &String,
rate: usize,
tx: Sender<String>,
port_state: Arc<Mutex<bool>>)
-> io::Result<()> {
let mut port = serial::open(portname.trim()).unwrap();
try!(port.reconfigure(&|settings| {
try!(settings.set_baud_rate(serial::BaudRate::from_speed(rate)));
settings.set_char_size(serial::Bits8);
settings.set_parity(serial::ParityNone);
settings.set_stop_bits(serial::Stop1);
settings.set_flow_control(serial::FlowNone);
Ok(())
}));
try!(port.set_timeout(Duration::from_millis(10000)));
println!("Serial port to {} opened successfully.", portname);
println!("Waiting for the start..");
while *(port_state.lock().unwrap()) != true {
}
println!("Port named {} started reading.", portname);
let mut ans_number: usize = 0;
let mut answer = String::new();
let mut bytes_received: usize = 0;
let mut buf = vec![0;128];
loop {
match port.read(&mut buf[..]) {
Ok(n) => {
bytes_received += n;
}
Err(_) => {
println!("Error in reading from {}", portname);
bytes_received = bytes_received;
}
}
if bytes_received > 10000 {
answer = String::new();
answer = format!("#{} Port {} received 10000 bytes of data",
ans_number,
portname);
tx.send(answer);
bytes_received = 0;
ans_number += 1;
}
if *(port_state.lock().unwrap()) == false {
println!("Port named {} got signal to stop. Abort.", portname);
break;
}
}
Ok(())
}
fn main() {
print_available_commands();
let mut reading_active = Arc::new(Mutex::new(false));
let (dtx, drx): (Sender<String>, Receiver<String>) = mpsc::channel();
let mut ports = vec![];
let mut input = String::new();
loop {
input = String::new();
match io::stdin().read_line(&mut input) {
Ok(n) => println!("Command received: {}", input.trim()),
Err(error) => println!("error: {}", error),
}
match input.trim() {
"connect" => {
let portname: String;
let baudrate: usize;
println!("Enter port name:");
portname = read!();
println!("Enter baudrate:");
baudrate = read!();
let thread_state = reading_active.clone();
let thread_tx = dtx.clone();
ports.push(thread::Builder::new().name(portname.clone()).spawn(move || {
connect_to_port(&portname, baudrate, thread_tx, thread_state);
}));
}
"start" => {
*(reading_active.lock().unwrap()) = true;
}
"stop" => {
*(reading_active.lock().unwrap()) = false;
}
"help" => print_available_commands(),
"print_ports" => print_available_ports(),
"exit" => {
println!("Closing used ports..");
}
"monitor" => {
loop {
println!("{:?}", drx.recv());
}
}
_ => println!("Unsupported command."),
}
}
}

Resources