// src/server.rs
use axum::{
extract::Path,
response::{IntoResponse, Response},
routing::get,
};
pub struct Server {}
impl Server {
pub async fn run() -> Result<(), Box<dyn std::error::Error>> {
let axum_http_make_service = axum::Router::new()
.route("/:sec", get(wait_sec_event))
.into_make_service();
let http_server =
axum::Server::bind(&"0.0.0.0:4000".parse().unwrap()).serve(axum_http_make_service);
let http_handle = tokio::spawn(http_server);
let _ = tokio::try_join!(http_handle)?;
Ok(())
}
}
async fn wait_sec_event(Path(sec): Path<String>) -> Response {
let a = std::time::Duration::from_secs(sec.parse::<u64>().unwrap());
std::thread::sleep(a);
"yo".into_response()
}
// src/app.rs
use std::net::SocketAddr;
use crate::server;
pub struct App {
port: SocketAddr,
}
impl App {
pub fn new(p: SocketAddr) -> Self {
Self { port: p }
}
pub async fn run(self) -> Result<(), Box<dyn std::error::Error>> {
server::Server::run().await
}
}
// src/main.rs
use std::net::SocketAddr;
use app::App;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// build our application with a single route
let app = App::new(SocketAddr::from(([0, 0, 0, 0], 4000)));
app.run().await
}
pub mod app;
pub mod server;
When I tried to implement a axum server I found that if I put axum::Server::bind(&"0.0.0.0:4000".parse().unwrap()).serve(axum_http_make_service); into tokio::spawn instead of just await.unwrap() it
the server just can't accept requests in parallel.
means if I do curl 127.0.0.1:4000/10 then curl 127.0.0.1:4000/3 ,
the later request won't execute until the first one is finished This won't happen if I just await.unwrap() it.
Any idea where I might make a mistake?
You use std::thread::sleep, blocking the thread, which you shouldn't do in an async environment because it can prevent other tasks on the same thread to run like you experienced.
Use tokio::time::sleep instead:
async fn wait_sec_event(Path(sec): Path<String>) -> Response {
let a = std::time::Duration::from_secs(sec.parse::<u64>().unwrap());
tokio::time::sleep(a).await;
"yo".into_response()
}
I believe the difference in behaviour is because more or less by chance the tasks get spawned on different threads in your directly awaiting scenario while they get spawned on the same thread when using tokio::spawn.
I have the following setup:
use core::task::Poll;
use tokio::io::ReadBuf;
use core::task::Context;
use core::pin::Pin;
use std::error::Error;
use tokio::io::AsyncRead;
struct Dummy;
impl AsyncRead for Dummy {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<tokio::io::Result<()>> {
Poll::Pending
}
}
fn request_peers() -> impl futures::stream::Stream<Item = impl futures::Future<Output = tokio::io::Result<impl tokio::io::AsyncRead>>> {
futures::stream::iter((0..10).map(move |i| {
futures::future::ok(Dummy{})
}))
}
async fn connect (
peers: impl futures::stream::Stream<Item = impl futures::Future<Output = tokio::io::Result<impl tokio::io::AsyncRead>>>
) -> impl futures::stream::Stream<Item = impl tokio::io::AsyncRead> {
todo!()
}
#[tokio::main]
async fn main() {
let peers = request_peers();
let connected_peers = connect(peers).await;
}
playground link
I want to connect all peers by awaiting a future and ignore the peers which do not connect. Ideally, I would want to keep the peers in a future::stream::Stream. I thought that the following might work:
use core::task::Poll;
use tokio::io::ReadBuf;
use core::task::Context;
use core::pin::Pin;
use std::error::Error;
use tokio::io::AsyncRead;
struct Dummy;
impl AsyncRead for Dummy {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<tokio::io::Result<()>> {
Poll::Pending
}
}
fn request_peers() -> impl futures::stream::Stream<Item = impl futures::Future<Output = tokio::io::Result<impl tokio::io::AsyncRead>>> {
futures::stream::iter((0..10).map(move |i| {
println!("instantiated");
futures::future::ok(Dummy{})
}))
}
use futures::{StreamExt};
fn connect (
peers: impl futures::stream::Stream<Item = impl futures::Future<Output = tokio::io::Result<impl tokio::io::AsyncRead>>>
) -> impl futures::stream::Stream<Item = impl tokio::io::AsyncRead> {
peers.filter_map(|peer_fut| async move {
if let Ok(peer) = peer_fut.await {
tokio::time::sleep(core::time::Duration::from_secs(1)).await;
println!("connected");
Some(peer)
} else {
None
}
})
}
#[tokio::main]
async fn main() {
let peers = request_peers();
let connected_peers = connect(peers);
connected_peers.for_each_concurrent(None, |peer| async {
println!("processed")
}).await;
}
playground link
But the peers are not connected concurrently, so this will take 10 seconds to finish - instead of ~1 sec.
I notice if I return a Vec instead of future::stream::Stream it will connect the peers concurrently with the following code snippet:
use futures::{StreamExt};
async fn connect (
peers: impl futures::stream::Stream<Item = impl futures::Future<Output = tokio::io::Result<impl tokio::io::AsyncRead>>>
) -> Vec<impl tokio::io::AsyncRead> {
let mut peers = peers.map(|peer_fut| async move {
if let Ok(peer) = peer_fut.await {
tokio::time::sleep(core::time::Duration::from_secs(1)).await;
println!("connected");
Some(peer)
} else {
None
}
})
.buffer_unordered(50)
.collect::<Vec<_>>().await;
peers.into_iter().flatten().collect()
}
#[tokio::main]
async fn main() {
let peers = request_peers();
let connected_peers = connect(peers).await;
futures::stream::iter(connected_peers).for_each_concurrent(None, |peer| async {
println!("processed")
}).await;
}
playground link
Is there a way to do this without converting to Vec and instead keeping the futures::stream::Stream ?
This sounds like a good use case for FuturesUnordered
You create a number of futures (I.e. by running map and collect on a Vec), then convert them into an iterator which asynchronously yields results from whatever future completes first.
If any futures return an error result, it could be skipped or handled appropriately.
So I'm getting a Response from the reqwest crate and passing it to a HttpResponseBuilder from the actix_web create. However I've tried and failed to understand how to implement the Stream trait from the futures create on a custom struct to act as a middleman and copy the contents down to a file.
I've tried doing this so far, but I'm not sure what to put inside that poll_next function to make it all work.
struct FileCache {
stream: Box<dyn futures::Stream<Item = reqwest::Result<bytes::Bytes>>>,
}
impl FileCache {
fn new(stream: Box<dyn futures::Stream<Item = reqwest::Result<bytes::Bytes>>>) -> Self {
FileCache { stream }
}
}
impl Stream for FileCache {
type Item = reqwest::Result<bytes::Bytes>;
fn poll_next(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
}
}
This is possible but requires you to understand what pinning is and how to use it safely.
Basically, we just need to proxy to self.stream.poll_next(), but this method accepts Pin<&mut Self> (as you can see in your own implementation). Storing the box as Pin<Box<T>> instead of Box<T> will give us a way to obtain this Pin relatively easily, without requiring unsafe. Making this change is straightforward, since there is a From implementation allowing conversion of Box<T> to Pin<Box<T>> directly:
use std::pin::Pin;
use std::task::{Context, Poll};
use futures::Stream;
struct FileCache {
stream: Pin<Box<dyn Stream<Item = reqwest::Result<bytes::Bytes>>>>,
}
impl FileCache {
fn new(stream: Box<dyn Stream<Item = reqwest::Result<bytes::Bytes>>>) -> FileCache {
FileCache { stream: stream.into() }
}
}
Now we have to figure out how to go from Pin<&mut FileCache> to Pin<&mut dyn Stream<...>>. The correct incantation here is self.get_mut().stream.as_mut():
impl Stream for FileCache {
type Item = reqwest::Result<bytes::Bytes>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
match self.get_mut().stream.as_mut().poll_next(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(v) => {
// Do what you need to do with v here.
Poll::Ready(v)
}
}
}
}
The catch is that poll_next isn't async and so you can't asynchronously wait for whatever you're doing with v. bytes::Bytes is atomically-refcounted, though, so you could clone the inner bytes::Bytes value and spawn a separate task on your executor, which is probably what you want to do anyway so that whoever is waiting for FileCache doesn't have to wait for that task to complete before using the data. So you'd do something like:
Poll::Ready(v) => {
if let Some(Ok(ref bytes)) = &v {
let bytes = bytes.clone();
spawn_new_task(async move {
// Do something with bytes
});
}
Poll::Ready(v)
}
Where spawn_new_task() is the function your executor provides, e.g. tokio::spawn().
Now that we can see what we're doing here, we can simplify this down and eliminate the match by pushing Poll::Ready into our pattern, and unconditionally returning whatever the inner poll_next() call did:
impl Stream for FileCache {
type Item = reqwest::Result<bytes::Bytes>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let r = self.get_mut().stream.as_mut().poll_next(cx);
if let Poll::Ready(Some(Ok(ref bytes))) = &r {
let bytes = bytes.clone();
spawn_new_task(async move {
// Do something with bytes
});
}
r
}
}
I'm trying to implement an async read wrapper that will add read timeout functionality. The objective is that the API is plain AsyncRead. In other words, I don't want to add io.read(buf).timeout(t) everywehere in the code. Instead, the read instance itself should return the appropriate io::ErrorKind::TimedOut after the given timeout expires.
I can't poll the delay to Ready though. It's always Pending. I've tried with async-std, futures, smol-timeout - the same result. While the timeout does trigger when awaited, it just doesn't when polled. I know timeouts aren't easy. Something needs to wake it up. What am I doing wrong? How to pull this through?
use async_std::{
future::Future,
io,
pin::Pin,
task::{sleep, Context, Poll},
};
use std::time::Duration;
pub struct PrudentIo<IO> {
expired: Option<Pin<Box<dyn Future<Output = ()> + Sync + Send>>>,
timeout: Duration,
io: IO,
}
impl<IO> PrudentIo<IO> {
pub fn new(timeout: Duration, io: IO) -> Self {
PrudentIo {
expired: None,
timeout,
io,
}
}
}
fn delay(t: Duration) -> Option<Pin<Box<dyn Future<Output = ()> + Sync + Send + 'static>>> {
if t.is_zero() {
return None;
}
Some(Box::pin(sleep(t)))
}
impl<IO: io::Read + Unpin> io::Read for PrudentIo<IO> {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
if let Some(ref mut expired) = self.expired {
match expired.as_mut().poll(cx) {
Poll::Ready(_) => {
println!("expired ready");
// too much time passed since last read/write
return Poll::Ready(Err(io::ErrorKind::TimedOut.into()));
}
Poll::Pending => {
println!("expired pending");
// in good time
}
}
}
let res = Pin::new(&mut self.io).poll_read(cx, buf);
println!("read {:?}", res);
match res {
Poll::Pending => {
if self.expired.is_none() {
// No data, start checking for a timeout
self.expired = delay(self.timeout);
}
}
Poll::Ready(_) => self.expired = None,
}
res
}
}
impl<IO: io::Write + Unpin> io::Write for PrudentIo<IO> {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io).poll_write(cx, buf)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.io).poll_flush(cx)
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.io).poll_close(cx)
}
}
#[cfg(test)]
mod io_tests {
use super::*;
use async_std::io::ReadExt;
use async_std::prelude::FutureExt;
use async_std::{
io::{copy, Cursor},
net::TcpStream,
};
use std::time::Duration;
#[async_std::test]
async fn fail_read_after_timeout() -> io::Result<()> {
let mut output = b"______".to_vec();
let io = PendIo;
let mut io = PrudentIo::new(Duration::from_millis(5), io);
let mut io = Pin::new(&mut io);
insta::assert_debug_snapshot!(io.read(&mut output[..]).timeout(Duration::from_secs(1)).await,#"Ok(io::Err(timeou))");
Ok(())
}
#[async_std::test]
async fn timeout_expires() {
let later = delay(Duration::from_millis(1)).expect("some").await;
insta::assert_debug_snapshot!(later,#r"()");
}
/// Mock IO always pending
struct PendIo;
impl io::Read for PendIo {
fn poll_read(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
_buf: &mut [u8],
) -> Poll<futures_io::Result<usize>> {
Poll::Pending
}
}
impl io::Write for PendIo {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
_buf: &[u8],
) -> Poll<futures_io::Result<usize>> {
Poll::Pending
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<futures_io::Result<()>> {
Poll::Pending
}
fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<futures_io::Result<()>> {
Poll::Pending
}
}
}
Async timeouts work as follows:
You create the timeout future.
The runtime calls poll into the timeout, it checks whether the timeout has expired.
If it is expired, it returns Ready and done.
If it is not expired, it somehow registers a callback for when the right time has passed it calls cx.waker().wake(), or similar.
When the time has passed, the callback from #4 is invoked, that calls wake() in the proper waker, which instructs the runtime to call poll again.
This time poll will return Ready. Done!
The problem with your code is that you create the delay from inside the poll() implementation: self.expired = delay(self.timeout);. But then you return Pending without polling the timeout even once. This way, there is no callback registered anywhere that would call the Waker. No waker, no timeout.
I see several solutions:
A. Do not initialize PrudentIo::expired to None but create the timeout directly in the constructor. That way the timeout will always be polled before the io at least once, and it will be woken. But you will create a timeout always, even if it is not actually needed.
B. When creating the timeout do a recursive poll:
Poll::Pending => {
if self.expired.is_none() {
// No data, start checking for a timeout
self.expired = delay(self.timeout);
return self.poll_read(cx, buf);
}
This will call the io twice, unnecesarily, so it may not be optimal.
C. Add a call to poll after creating the timeout:
Poll::Pending => {
if self.expired.is_none() {
// No data, start checking for a timeout
self.expired = delay(self.timeout);
self.expired.as_mut().unwrap().as_mut().poll(cx);
}
Maybe you should match the output of poll in case it returns Ready, but hey, it's a new timeout, it's probably pending yet, and it seems to work nicely.
// This is another solution. I think it is better.
impl<IO: io::AsyncRead + Unpin> io::AsyncRead for PrudentIo<IO> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
let this = self.get_mut();
let io = Pin::new(&mut this.io);
if let Poll::Ready(res) = io.poll_read(cx, buf) {
return Poll::Ready(res);
}
loop {
if let Some(expired) = this.expired.as_mut() {
ready!(expired.poll(cx));
this.expired.take();
return Poll::Ready(Err(io::ErrorKind::TimedOut.into()));
}
let timeout = Timer::after(this.timeout);
this.expired = Some(timeout);
}
}
}
// 1. smol used, not async_std.
// 2. IO should be 'static.
// 3. when timeout, read_poll return Poll::Ready::Err(io::ErrorKind::Timeout)
use {
smol::{future::FutureExt, io, ready, Timer},
std::{
future::Future,
pin::Pin,
task::{Context, Poll},
time::Duration,
},
};
// --
pub struct PrudentIo<IO> {
expired: Option<Pin<Box<dyn Future<Output = io::Result<usize>>>>>,
timeout: Duration,
io: IO,
}
impl<IO> PrudentIo<IO> {
pub fn new(timeout: Duration, io: IO) -> Self {
PrudentIo {
expired: None,
timeout,
io,
}
}
}
impl<IO: io::AsyncRead + Unpin + 'static> io::AsyncRead for PrudentIo<IO> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
let this = self.get_mut();
loop {
if let Some(expired) = this.expired.as_mut() {
let res = ready!(expired.poll(cx))?;
this.expired.take();
return Ok(res).into();
}
let timeout = this.timeout.clone();
let (io, read_buf) = unsafe {
// Safety: ONLY used in poll_read method.
(&mut *(&mut this.io as *mut IO), &mut *(buf as *mut [u8]))
};
let fut = async move {
let timeout_fut = async {
Timer::after(timeout).await;
io::Result::<usize>::Err(io::ErrorKind::TimedOut.into())
};
let read_fut = io::AsyncReadExt::read(io, read_buf);
let res = read_fut.or(timeout_fut).await;
res
}
.boxed_local();
this.expired = Some(fut);
}
}
}
impl<IO: io::AsyncWrite + Unpin> io::AsyncWrite for PrudentIo<IO> {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io).poll_write(cx, buf)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.io).poll_flush(cx)
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.io).poll_close(cx)
}
}
Problem Description
I have a Config struct that can store a FnMut callback function. The catch is: not all of my configurations require a callback function, so I would like to make adding a callback function optional. This requires the member variable to be initialized with a default function that will get used if no callback is set.
Existing Code
struct Config<'a>{
callback: &'a mut dyn (FnMut(&str))
}
fn default_fn(msg: &str){
println!("default_fn({})", msg);
}
impl<'a> Config<'a> {
pub fn new() -> Config<'a> {
Config{
callback: &default_fn // ERROR: types differ in mutability
}
}
pub fn set_callback(mut self, callback_fn: &'a mut dyn (FnMut(&str))) -> Config<'a> {
self.callback = callback_fn;
self
}
}
fn main() {
// Our FnMut callback
let mut msg_log: Vec<String> = vec![];
let mut callback_fn = |msg: &str| {
msg_log.push(msg.to_string());
};
{
let mut config = Config::new();
(config.callback)("Hello World!");
config = config.set_callback(&mut callback_fn);
(config.callback)("Hello World!");
}
// Demonstration that the callback actually works
println!("{:?}", msg_log);
}
error[E0308]: mismatched types
--> src/main.rs:13:23
|
13 | callback: &default_fn // ERROR: types differ in mutability
| ^^^^^^^^^^^ types differ in mutability
|
= note: expected type `&mut dyn for<'r> std::ops::FnMut(&'r str)`
found type `&for<'r> fn(&'r str) {default_fn}`
Does someone have any suggestions on how to solve that problem?
Things I already tried, without any success:
Initializing it with a closure: callback: &|_: &str|{}
Using a member function instead of a global function
Creating a mutable reference: callback: &mut default_fn
(causes: cannot return value referencing temporary value)
I'm running out of ideas, any help is appreciated. Even if the answer is that what I am trying to do is impossible for reasons I didn't realize yet.
You should really box the trait object function. That makes the whole code much easier to use:
struct Config<'a>{
callback: Box<dyn FnMut(&str) + 'a>,
}
fn default_fn(msg: &str){
println!("default_fn({})", msg);
}
impl<'a> Config<'a> {
pub fn new() -> Config<'a> {
Config{
callback: Box::new(default_fn)
}
}
pub fn set_callback(self, callback: &'a mut dyn (FnMut(&str))) -> Config<'a> {
Config {
callback: Box::new(callback),
..self
}
}
}
fn main() {
// Our FnMut callback
let mut msg_log = vec![];
let mut callback_fn = |msg: &str| {
msg_log.push(msg.to_string());
};
{
let mut config = Config::new();
(config.callback)("Hello World!");
config = config.set_callback(&mut callback_fn);
(config.callback)("Hello World!");
}
// Demonstration that the callback actually works
println!("{:?}", msg_log);
}
Note that it is difficult to use callbacks in idiomatic Rust. I would even say that they aren't idiomatic at all. You should use a channel, something like that:
use std::sync::mpsc::{channel, Sender, SendError};
struct Config {
sender: Sender<String>,
}
impl Config {
pub fn new(sender: Sender<String>) -> Config {
Config{
sender
}
}
pub fn send(&self, message: String) -> Result<(), SendError<String>> {
self.sender.send(message)
}
}
fn main() {
let (sender, receiver) = channel();
let config = Config::new(sender);
config.send("Hello world!".into()).unwrap();
println!("{:?}", receiver.recv().unwrap());
}
Just wanted to share the solution I found:
Single-threaded, callback-based.
While in my opinion this one really answers the question I had, I think you guys are still right about the problems I might encounter in the future with this programming style. I will definitely reconsider your advice about using channels.
struct Config<'a>{
callback: Option<&'a mut dyn (FnMut(&str))>
}
impl<'a> Config<'a> {
pub fn new() -> Config<'a> {
Config{
callback: None
}
}
pub fn set_callback(mut self, callback_fn: &'a mut dyn (FnMut(&str))) -> Config<'a> {
self.callback = Some(callback_fn);
self
}
pub fn run_callback(&mut self, msg: &str){
if let Some(callback) = &mut self.callback{
callback(msg);
} else {
// Default code
println!("default_fn({})", msg);
}
}
}
fn main() {
// Our FnMut callback
let mut msg_log: Vec<String> = vec![];
let mut callback_fn = |msg: &str| {
msg_log.push(msg.to_string());
};
let mut config = Config::new();
config.run_callback("Hello World!");
config = config.set_callback(&mut callback_fn);
config.run_callback("Hello World!");
// Demonstration that the callback actually works
println!("{:?}", msg_log);
}