I'm using tokio::runtime::current_thread::Runtime and I want to able to run a future and stop the reactor in the same thread. The example on the page doesn't show how to stop the runtime. Is there any way I can do that?
The runtime will automatically shut down when when the future is complete:
use std::time::Duration;
use tokio::time; // 0.2.21
#[tokio::main]
async fn main() {
time::delay_for(Duration::from_secs(2)).await;
eprintln!("future complete");
}
See How do I synchronously return a value calculated in an asynchronous Future in stable Rust? for other ways of creating a runtime.
If you need to cancel a future, you can create something that will cause future polls to succeed. I'd probably use channels and select:
use futures::{channel::oneshot, future, FutureExt}; // 0.3.5
use std::time::Duration;
use tokio::{task, time}; // 0.2.21
#[tokio::main]
async fn main() {
let future = async {
time::delay_for(Duration::from_secs(3600)).await;
eprintln!("future complete");
};
let (cancel_tx, cancel_rx) = oneshot::channel();
let another_task = task::spawn(async {
eprintln!("Another task started");
time::delay_for(Duration::from_secs(2)).await;
eprintln!("Another task canceling the future");
cancel_tx.send(()).expect("Unable to cancel");
eprintln!("Another task exiting");
});
future::select(future.boxed(), cancel_rx).await;
another_task.await.expect("The other task panicked");
}
Here's an alternate manual solution that's very simple, brute force, and probably not-very-performant:
use pin_project::pin_project; // 0.4.17
use std::{
future::Future,
pin::Pin,
sync::{Arc, Mutex},
task::{self, Context, Poll},
thread,
time::Duration,
};
use tokio::time; // 0.2.21
#[tokio::main]
async fn main() {
let future = async {
time::delay_for(Duration::from_secs(3600)).await;
eprintln!("future complete");
};
let (future, cancel) = Cancelable::new(future);
let another_thread = thread::spawn(|| {
eprintln!("Another thread started");
thread::sleep(Duration::from_secs(2));
eprintln!("Another thread canceling the future");
cancel();
eprintln!("Another thread exiting");
});
future.await;
another_thread.join().expect("The other thread panicked");
}
#[pin_project]
#[derive(Debug)]
struct Cancelable<F> {
#[pin]
inner: F,
info: Arc<Mutex<CancelInfo>>,
}
#[derive(Debug, Default)]
struct CancelInfo {
cancelled: bool,
task: Option<task::Waker>,
}
impl<F> Cancelable<F> {
fn new(inner: F) -> (Self, impl FnOnce()) {
let info = Arc::new(Mutex::new(CancelInfo::default()));
let cancel = {
let info = info.clone();
move || {
let mut info = info.lock().unwrap();
info.cancelled = true;
if let Some(waker) = info.task.take() {
waker.wake();
}
}
};
let me = Cancelable { inner, info };
(me, cancel)
}
}
impl<F> Future for Cancelable<F>
where
F: Future<Output = ()>,
{
type Output = ();
fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let mut info = this.info.lock().unwrap();
if info.cancelled {
Poll::Ready(())
} else {
let r = this.inner.poll(ctx);
if r.is_pending() {
info.task = Some(ctx.waker().clone());
}
r
}
}
}
See also:
When is it safe to move a member value out of a pinned future?
Related
I have a polling function that will forever poll and always do pending. Inside the polling function poll_event_loop I want to control timings when the context should be polled again and the function to be called again in x seconds depending on some conditions. I could do this using another thread which calls the waker.wake_by_ref function. But this feels like a cheat. How could I do this without other threads.
poll_fn(|cx| self.poll_event_loop(cx)).await
// function will never be Ready, always pending, polling sould be fast so not just sleeping x seconds inside
fn poll_event_loop(&mut self, cx: &mut Context) -> Poll<anyhow::Result<()>> {
while some_codeandfunc() { /*....*/ }
// guarantee another poll_fn in 1 sec
if condition {
context_callback(cx, 1000);
}
// guarantee another poll_fn in 2 sec
if condition {
context_callback(cx, 2000);
}
Poll::Pending
}
fn context_callback(context: &mut Context, millisec: u64) {
let mut future = Box::pin(tokio::time::sleep(Durationtk::from_millis(millisec)));
//let cb = future.as_mut().poll(context);
future.poll_unpin(context);
}
// ugly way to auto poll the function every x seconds
fn spawn_qeueu_thread(waker: &Waker, rx: &Receiver<String>) -> Option<JoinHandle<()>> {
debug!("doing spawning thread");
//self.thread_spawned = true;
let waker = waker.clone();
let rx2 = rx.clone();
let spawn = tokio::spawn(async move {
loop {
tokio::time::sleep(Durationtk::from_millis(WAKEUPINTERVAL)).await;
debug!("doing other thread wakebyref");
waker.wake_by_ref();
let try_result = rx2.try_recv();
match try_result {
Err(_) => {}
Ok(_msg) => break,
}
}
debug!("ending spawned thread");
});
return Some(spawn);
//self.threadhandle = Some(spawn);
}
After some experimenting, I found a working solution that lets you call the polling function on multiple desired times, working example:
use chrono::Utc;
use futures::FutureExt;
use futures::future::poll_fn;
use tokio::time::Sleep;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
use tokio::time::Duration as Durationtk;
pub struct ControllerModule {
vec: Vec<Pin<Box<Sleep>>>,
i: i64,
}
impl ControllerModule {
fn new() -> Self {
let vec = vec![];
let i = 0;
Self { vec, i }
}
async fn start(&mut self) {
poll_fn(|cx| self.poll_event_loop(cx)).await;
print!("worked");
}
fn poll_event_loop(&mut self, context: &mut Context) -> Poll<anyhow::Result<()>> {
self.i += 1;
if self.i % 3 == 0 {
let mut sleep = Box::pin(tokio::time::sleep(Durationtk::from_millis(5000)));
sleep.poll_unpin(context);
self.vec.push(sleep);
} else if self.i % 3 == 1 {
let mut sleep = Box::pin(tokio::time::sleep(Durationtk::from_millis(4000)));
sleep.poll_unpin(context);
self.vec.push(sleep);
} else {
context.waker().wake_by_ref();
}
self.vec.retain(|e| !e.is_elapsed());
Poll::Pending
}
}
#[tokio::main]
async fn main() {
let mut i = ControllerModule::new();
i.start().await
}
I have the following setup
use futures::{
future,
stream::{self, Stream, FuturesUnordered},
};
use tokio;
fn foo(futures: FuturesUnordered<impl futures::Future<Output = std::io::Result<impl std::fmt::Binary>>>) {}
fn bar(futures: FuturesUnordered<impl futures::Future<Output = impl std::fmt::Binary>>) {}
#[tokio::main]
async fn main() {
let futures: FuturesUnordered<_> = (0..10).map(move |i| async move {
let mut delay = core::time::Duration::from_secs(rand::Rng::gen_range(&mut rand::thread_rng(), 1..3));
tokio::time::sleep(delay).await;
Ok::<i32, std::io::Error>(i) // this line can't be changed
}).collect();
// this is ok
foo(futures);
// this will not compile
bar(futures);
}
playground link
I want to be able to call the bar function with futures. Given that I can't change how futures is initialized, how do I ignore the errors in the stream and only process the elements which are not errors?
There is a similar SO question about this here: How can I remove or otherwise ignore errors when processing a stream?
But the answer uses stream::iter_ok which I think is deprecated or something?
I expected the following to work:
use futures::{
future,
stream::{self, Stream, FuturesUnordered},
StreamExt,
};
use tokio;
fn foo(futures: FuturesUnordered<impl futures::Future<Output = std::io::Result<impl std::fmt::Binary>>>) {}
async fn bar(futures: FuturesUnordered<impl futures::Future<Output = impl std::fmt::Binary>>) {
futures.for_each(|n| {
async move {
println!("Success on {:b}", n);
}
}).await
}
#[tokio::main]
async fn main() {
let futures: FuturesUnordered<_> = (0..10).map(move |i| async move {
let mut delay = core::time::Duration::from_secs(rand::Rng::gen_range(&mut rand::thread_rng(), 1..3));
tokio::time::sleep(delay).await;
Ok::<i32, std::io::Error>(i)
}).collect();
let futures = futures
.then(|r| future::ok(iter_ok::<_, ()>(r)))
.flatten();
bar(futures).await;
}
playground link
You can create a stream over the successful values of another stream like so:
use futures::{
stream::{self, Stream, FuturesUnordered},
StreamExt,
};
use tokio;
async fn bar(futures: impl Stream<Item = impl std::fmt::Binary>) {
futures.for_each(|n| {
async move {
println!("Success on {:b}", n);
}
}).await
}
#[tokio::main]
async fn main() {
let futures: FuturesUnordered<_> = (0..10).map(move |i| async move {
let delay = core::time::Duration::from_secs(rand::Rng::gen_range(&mut rand::thread_rng(), 1..3));
tokio::time::sleep(delay).await;
Ok::<i32, std::io::Error>(i)
}).collect();
let futures = futures
.then(|r| async { stream::iter(r.into_iter()) })
.flatten();
bar(futures).await;
}
Note: since the type returned by .then() includes the closure and thus can't be named we have to change the type of futures in bar().
I am attempting to create a simple event loop with Mio. Every event invokes an associated callback depending on the Token of the event. The event loop runs on a separate thread to the rest of the code.
Events can be registered via the register function, however in order to register the event it must add it to the same HashMap of callbacks (and access the same Mio Poll struct) that the event loop is iterating.
The issue is that callbacks themselves may register events, in this case it is impossible to take the mutex as the event loop has the mutex. Is it possible to somehow drop the Mutex in the start() function whilst the callback is being invoked? Although, this does not seem performant.
Is there a better way to handle this in Rust?
use mio::event::Event;
use mio::net::{TcpListener, TcpStream};
use mio::{event, Events, Interest, Poll, Token};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::thread::JoinHandle;
use std::{io, thread};
pub trait HookCb: Send {
fn call(&self, event: &Event);
}
impl<F: Send> HookCb for F
where
F: Fn(&Event),
{
fn call(&self, event: &Event) {
self(event)
}
}
struct EventLoopInner {
handlers: HashMap<Token, Box<dyn HookCb>>,
poll: Poll,
}
pub struct EventLoop {
inner: Mutex<EventLoopInner>,
}
impl EventLoop {
pub fn new() -> io::Result<Self> {
Ok(Self {
inner: Mutex::new(EventLoopInner {
handlers: HashMap::new(),
poll: Poll::new()?,
}),
})
}
pub fn start(&self) -> io::Result<()> {
let mut events = Events::with_capacity(1024);
let inner = &mut *self.inner.lock().unwrap(); // Inner mutex taken
loop {
inner.poll.poll(&mut events, None)?;
for event in events.iter() {
match event.token() {
Token(v) => {
if let Some(cb) = inner.handlers.get(&Token(v)) {
// TODO release the inner mutex before here so that the callback can invoke register
cb.call(event)
}
}
}
}
}
}
pub fn register<S>(
&self,
source: &mut S,
interest: Interest,
cb: impl HookCb + 'static,
) -> io::Result<Token>
where
S: event::Source + std::marker::Send,
{
let mut inner = self.inner.lock().unwrap(); // Cannot get this lock after start() has been invoked
let token = Token(inner.handlers.len());
inner.poll.registry().register(source, token, interest)?;
inner.handlers.insert(token, Box::new(cb));
Ok(token)
}
}
struct ServerConn {
listener: Option<TcpListener>,
connections: HashMap<Token, TcpStream>,
}
struct Server {
eventloop: Arc<EventLoop>,
thread: Option<JoinHandle<()>>,
conn: Arc<Mutex<ServerConn>>,
}
impl Server {
pub fn new() -> Self {
Self {
eventloop: Arc::new(EventLoop::new().unwrap()),
thread: None,
conn: Arc::new(Mutex::new(ServerConn {
listener: None,
connections: HashMap::new(),
})),
}
}
pub fn listen(&mut self, addr: &str) {
{
let mut conn = self.conn.lock().unwrap();
conn.listener = Some(TcpListener::bind(addr.parse().unwrap()).unwrap());
let cb_conn = Arc::clone(&self.conn);
let cb_eventloop = Arc::clone(&self.eventloop);
self.eventloop
.register(
conn.listener.as_mut().unwrap(),
Interest::READABLE,
move |e: &Event| {
Self::accept_cb(e, &cb_conn, &cb_eventloop);
},
)
.unwrap();
} // Unlock conn
let t_eventloop = Arc::clone(&self.eventloop);
self.thread = Some(thread::spawn(move || {
t_eventloop.start().unwrap();
}));
self.thread.take().unwrap().join().unwrap(); // Temp fix to block main thread so application does not exit
}
fn accept_cb(_e: &Event, conn: &Arc<Mutex<ServerConn>>, evloop: &Arc<EventLoop>) {
let mut conn_lock = conn.lock().unwrap();
loop {
let (mut stream, addr) = match conn_lock.listener.as_ref().unwrap().accept() {
Ok((stream, addr)) => (stream, addr),
Err(_) => return,
};
println!("Accepted connection from: {}", addr);
// TODO can this clone be avoided?
let cb_conn = Arc::clone(conn);
let cb_evloop = Arc::clone(evloop);
let token = evloop
.register(
&mut stream,
Interest::READABLE.add(Interest::WRITABLE),
move |e: &Event| {
Self::conn_cb(e, &cb_conn, &cb_evloop);
},
)
.unwrap();
conn_lock.connections.insert(token, stream);
}
}
pub fn conn_cb(e: &Event, conn: &Arc<Mutex<ServerConn>>, _evloop: &Arc<EventLoop>) {
let conn_lock = conn.lock().unwrap();
let mut _connection = conn_lock.connections.get(&e.token()).unwrap();
if e.is_writable() {
// TODO write logic -- connection.write(b"Hello World!\n").unwrap();
// TODO evloop.reregister(&mut connection, event.token(), Interest::READABLE)?
}
if e.is_readable() {
// TODO read logic -- connection.read(&mut received_data);
}
}
}
fn main() {
let mut s = Server::new();
s.listen("127.0.0.1:8000");
}
I implemented the future and made a request of it, but it blocked my curl and the log shows that poll was only invoked once.
Did I implement anything wrong?
use failure::{format_err, Error};
use futures::{future, Async};
use hyper::rt::Future;
use hyper::service::{service_fn, service_fn_ok};
use hyper::{Body, Method, Request, Response, Server, StatusCode};
use log::{debug, error, info};
use std::{
sync::{Arc, Mutex},
task::Waker,
thread,
};
pub struct TimerFuture {
shared_state: Arc<Mutex<SharedState>>,
}
struct SharedState {
completed: bool,
resp: String,
}
impl Future for TimerFuture {
type Item = Response<Body>;
type Error = hyper::Error;
fn poll(&mut self) -> futures::Poll<Response<Body>, hyper::Error> {
let mut shared_state = self.shared_state.lock().unwrap();
if shared_state.completed {
return Ok(Async::Ready(Response::new(Body::from(
shared_state.resp.clone(),
))));
} else {
return Ok(Async::NotReady);
}
}
}
impl TimerFuture {
pub fn new(instance: String) -> Self {
let shared_state = Arc::new(Mutex::new(SharedState {
completed: false,
resp: String::new(),
}));
let thread_shared_state = shared_state.clone();
thread::spawn(move || {
let res = match request_health(instance) {
Ok(status) => status.clone(),
Err(err) => {
error!("{:?}", err);
format!("{}", err)
}
};
let mut shared_state = thread_shared_state.lock().unwrap();
shared_state.completed = true;
shared_state.resp = res;
});
TimerFuture { shared_state }
}
}
fn request_health(instance_name: String) -> Result<String, Error> {
std::thread::sleep(std::time::Duration::from_secs(1));
Ok("health".to_string())
}
type BoxFut = Box<dyn Future<Item = Response<Body>, Error = hyper::Error> + Send>;
fn serve_health(req: Request<Body>) -> BoxFut {
let mut response = Response::new(Body::empty());
let path = req.uri().path().to_owned();
match (req.method(), path) {
(&Method::GET, path) => {
return Box::new(TimerFuture::new(path.clone()));
}
_ => *response.status_mut() = StatusCode::NOT_FOUND,
}
Box::new(future::ok(response))
}
fn main() {
let endpoint_addr = "0.0.0.0:8080";
match std::thread::spawn(move || {
let addr = endpoint_addr.parse().unwrap();
info!("Server is running on {}", addr);
hyper::rt::run(
Server::bind(&addr)
.serve(move || service_fn(serve_health))
.map_err(|e| eprintln!("server error: {}", e)),
);
})
.join()
{
Ok(e) => e,
Err(e) => println!("{:?}", e),
}
}
After compile and run this code, a server with port 8080 is running. Call the server with curl and it will block:
curl 127.0.0.1:8080/my-health-scope
Did I implement anything wrong?
Yes, you did not read and follow the documentation for the method you are implementing (emphasis mine):
When a future is not ready yet, the Async::NotReady value will be returned. In this situation the future will also register interest of the current task in the value being produced. This is done by calling task::park to retrieve a handle to the current Task. When the future is then ready to make progress (e.g. it should be polled again) the unpark method is called on the Task.
As a minimal, reproducible example, let's use this:
use futures::{future::Future, Async};
use std::{
mem,
sync::{Arc, Mutex},
thread,
time::Duration,
};
pub struct Timer {
data: Arc<Mutex<String>>,
}
impl Timer {
pub fn new(instance: String) -> Self {
let data = Arc::new(Mutex::new(String::new()));
thread::spawn({
let data = data.clone();
move || {
thread::sleep(Duration::from_secs(1));
*data.lock().unwrap() = instance;
}
});
Timer { data }
}
}
impl Future for Timer {
type Item = String;
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
let mut data = self.data.lock().unwrap();
eprintln!("poll was called");
if data.is_empty() {
Ok(Async::NotReady)
} else {
let data = mem::replace(&mut *data, String::new());
Ok(Async::Ready(data))
}
}
}
fn main() {
let v = Timer::new("Some text".into()).wait();
println!("{:?}", v);
}
It only prints out "poll was called" once.
You can call task::current (previously task::park) in the implementation of Future::poll, save the resulting value, then use the value with Task::notify (previously Task::unpark) whenever the future may be polled again:
use futures::{
future::Future,
task::{self, Task},
Async,
};
use std::{
mem,
sync::{Arc, Mutex},
thread,
time::Duration,
};
pub struct Timer {
data: Arc<Mutex<(String, Option<Task>)>>,
}
impl Timer {
pub fn new(instance: String) -> Self {
let data = Arc::new(Mutex::new((String::new(), None)));
let me = Timer { data };
thread::spawn({
let data = me.data.clone();
move || {
thread::sleep(Duration::from_secs(1));
let mut data = data.lock().unwrap();
data.0 = instance;
if let Some(task) = data.1.take() {
task.notify();
}
}
});
me
}
}
impl Future for Timer {
type Item = String;
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
let mut data = self.data.lock().unwrap();
eprintln!("poll was called");
if data.0.is_empty() {
let v = task::current();
data.1 = Some(v);
Ok(Async::NotReady)
} else {
let data = mem::replace(&mut data.0, String::new());
Ok(Async::Ready(data))
}
}
}
fn main() {
let v = Timer::new("Some text".into()).wait();
println!("{:?}", v);
}
See also:
Why does Future::select choose the future with a longer sleep period first?
Why is `Future::poll` not called repeatedly after returning `NotReady`?
What is the best approach to encapsulate blocking I/O in future-rs?
I'm trying to make a Stream that would wait until a specific character is in buffer. I know there's read_until() on BufRead but I actually need a custom solution, as this is a stepping stone to implement waiting until a specific string in in buffer (or, for example, a regexp match happens).
In my project where I first encountered the problem, problem was that future processing just hanged when I get a Ready(_) from inner future and return NotReady from my function. I discovered I shouldn't do that per docs (last paragraph). However, what I didn't get, is what's the actual alternative that is promised in that paragraph. I read all the published documentation on the Tokio site and it doesn't make sense for me at the moment.
So following is my current code. Unfortunately I couldn't make it simpler and smaller as it's already broken. Current result is this:
Err(Custom { kind: Other, error: Error(Shutdown) })
Err(Custom { kind: Other, error: Error(Shutdown) })
Err(Custom { kind: Other, error: Error(Shutdown) })
<ad infinum>
Expected result is getting some Ok(Ready(_)) out of it, while printing W and W', and waiting for specific character in buffer.
extern crate futures;
extern crate tokio_core;
extern crate tokio_io;
extern crate tokio_io_timeout;
extern crate tokio_process;
use futures::stream::poll_fn;
use futures::{Async, Poll, Stream};
use tokio_core::reactor::Core;
use tokio_io::AsyncRead;
use tokio_io_timeout::TimeoutReader;
use tokio_process::CommandExt;
use std::process::{Command, Stdio};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;
struct Process {
child: tokio_process::Child,
stdout: Arc<Mutex<tokio_io_timeout::TimeoutReader<tokio_process::ChildStdout>>>,
}
impl Process {
fn new(
command: &str,
reader_timeout: Option<Duration>,
core: &tokio_core::reactor::Core,
) -> Self {
let mut cmd = Command::new(command);
let cat = cmd.stdout(Stdio::piped());
let mut child = cat.spawn_async(&core.handle()).unwrap();
let stdout = child.stdout().take().unwrap();
let mut timeout_reader = TimeoutReader::new(stdout);
timeout_reader.set_timeout(reader_timeout);
let timeout_reader = Arc::new(Mutex::new(timeout_reader));
Self {
child,
stdout: timeout_reader,
}
}
}
fn work() -> Result<(), ()> {
let window = Arc::new(Mutex::new(Vec::new()));
let mut core = Core::new().unwrap();
let process = Process::new("cat", Some(Duration::from_secs(20)), &core);
let mark = Arc::new(Mutex::new(b'c'));
let read_until_stream = poll_fn({
let window = window.clone();
let timeout_reader = process.stdout.clone();
move || -> Poll<Option<u8>, std::io::Error> {
let mut buf = [0; 8];
let poll;
{
let mut timeout_reader = timeout_reader.lock().unwrap();
poll = timeout_reader.poll_read(&mut buf);
}
match poll {
Ok(Async::Ready(0)) => Ok(Async::Ready(None)),
Ok(Async::Ready(x)) => {
{
let mut window = window.lock().unwrap();
println!("W: {:?}", *window);
println!("buf: {:?}", &buf[0..x]);
window.extend(buf[0..x].into_iter().map(|x| *x));
println!("W': {:?}", *window);
if let Some(_) = window.iter().find(|c| **c == *mark.lock().unwrap()) {
Ok(Async::Ready(Some(1)))
} else {
Ok(Async::NotReady)
}
}
}
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(e) => Err(e),
}
}
});
let _stream_thread = thread::spawn(move || {
for o in read_until_stream.wait() {
println!("{:?}", o);
}
});
match core.run(process.child) {
Ok(_) => {}
Err(e) => {
println!("Child error: {:?}", e);
}
}
Ok(())
}
fn main() {
work().unwrap();
}
This is complete example project.
If you need more data you need to call poll_read again until you either find what you were looking for or poll_read returns NotReady.
You might want to avoid looping in one task for too long, so you can build yourself a yield_task function to call instead if poll_read didn't return NotReady; it makes sure your task gets called again ASAP after other pending tasks were run.
To use it just run return yield_task();.
fn yield_inner() {
use futures::task;
task::current().notify();
}
#[inline(always)]
pub fn yield_task<T, E>() -> Poll<T, E> {
yield_inner();
Ok(Async::NotReady)
}
Also see futures-rs#354: Handle long-running, always-ready futures fairly #354.
With the new async/await API futures::task::current is gone; instead you'll need a std::task::Context reference, which is provided as parameter to the new std::future::Future::poll trait method.
If you're already manually implementing the std::future::Future trait you can simply insert:
context.waker().wake_by_ref();
return std::task::Poll::Pending;
Or build yourself a Future-implementing type that yields exactly once:
pub struct Yield {
ready: bool,
}
impl core::future::Future for Yield {
type Output = ();
fn poll(self: core::pin::Pin<&mut Self>, cx: &mut core::task::Context<'_>) -> core::task::Poll<Self::Output> {
let this = self.get_mut();
if this.ready {
core::task::Poll::Ready(())
} else {
cx.waker().wake_by_ref();
this.ready = true; // ready next round
core::task::Poll::Pending
}
}
}
pub fn yield_task() -> Yield {
Yield { ready: false }
}
And then use it in async code like this:
yield_task().await;