Ignore errors in FuturesUnordered - rust

I have the following setup
use futures::{
future,
stream::{self, Stream, FuturesUnordered},
};
use tokio;
fn foo(futures: FuturesUnordered<impl futures::Future<Output = std::io::Result<impl std::fmt::Binary>>>) {}
fn bar(futures: FuturesUnordered<impl futures::Future<Output = impl std::fmt::Binary>>) {}
#[tokio::main]
async fn main() {
let futures: FuturesUnordered<_> = (0..10).map(move |i| async move {
let mut delay = core::time::Duration::from_secs(rand::Rng::gen_range(&mut rand::thread_rng(), 1..3));
tokio::time::sleep(delay).await;
Ok::<i32, std::io::Error>(i) // this line can't be changed
}).collect();
// this is ok
foo(futures);
// this will not compile
bar(futures);
}
playground link
I want to be able to call the bar function with futures. Given that I can't change how futures is initialized, how do I ignore the errors in the stream and only process the elements which are not errors?
There is a similar SO question about this here: How can I remove or otherwise ignore errors when processing a stream?
But the answer uses stream::iter_ok which I think is deprecated or something?
I expected the following to work:
use futures::{
future,
stream::{self, Stream, FuturesUnordered},
StreamExt,
};
use tokio;
fn foo(futures: FuturesUnordered<impl futures::Future<Output = std::io::Result<impl std::fmt::Binary>>>) {}
async fn bar(futures: FuturesUnordered<impl futures::Future<Output = impl std::fmt::Binary>>) {
futures.for_each(|n| {
async move {
println!("Success on {:b}", n);
}
}).await
}
#[tokio::main]
async fn main() {
let futures: FuturesUnordered<_> = (0..10).map(move |i| async move {
let mut delay = core::time::Duration::from_secs(rand::Rng::gen_range(&mut rand::thread_rng(), 1..3));
tokio::time::sleep(delay).await;
Ok::<i32, std::io::Error>(i)
}).collect();
let futures = futures
.then(|r| future::ok(iter_ok::<_, ()>(r)))
.flatten();
bar(futures).await;
}
playground link

You can create a stream over the successful values of another stream like so:
use futures::{
stream::{self, Stream, FuturesUnordered},
StreamExt,
};
use tokio;
async fn bar(futures: impl Stream<Item = impl std::fmt::Binary>) {
futures.for_each(|n| {
async move {
println!("Success on {:b}", n);
}
}).await
}
#[tokio::main]
async fn main() {
let futures: FuturesUnordered<_> = (0..10).map(move |i| async move {
let delay = core::time::Duration::from_secs(rand::Rng::gen_range(&mut rand::thread_rng(), 1..3));
tokio::time::sleep(delay).await;
Ok::<i32, std::io::Error>(i)
}).collect();
let futures = futures
.then(|r| async { stream::iter(r.into_iter()) })
.flatten();
bar(futures).await;
}
Note: since the type returned by .then() includes the closure and thus can't be named we have to change the type of futures in bar().

Related

tokio::select! but for a Vec of futures

I have a Vec of futures which I want to execute concurrently (but not necessarily in parallel). Basically, I'm looking for some kind of select function that is similar to tokio::select! but takes a collection of futures, or, conversely, a function that is similar to futures::join_all but returns once the first future is done.
An additional requirement is that once a future finished I might want to add a new future to the Vec.
With such a function, my code would roughly look like this:
use std::future::Future;
use std::time::Duration;
use tokio::time::sleep;
async fn wait(millis: u64) -> u64 {
sleep(Duration::from_millis(millis)).await;
millis
}
// This pseudo-implementation simply removes the last
// future and awaits it. I'm looking for something that
// instead polls all futures until one is finished, then
// removes that future from the Vec and returns it.
async fn select<F, O>(futures: &mut Vec<F>) -> O
where
F: Future<Output=O>
{
let future = futures.pop().unwrap();
future.await
}
#[tokio::main]
async fn main() {
let mut futures = vec![
wait(500),
wait(300),
wait(100),
wait(200),
];
while !futures.is_empty() {
let finished = select(&mut futures).await;
println!("Waited {}ms", finished);
if some_condition() {
futures.push(wait(200));
}
}
}
This is exactly what futures::stream::FuturesUnordered is for (which I've found by looking through the source of StreamExt::for_each_concurrent):
use futures::{stream::FuturesUnordered, StreamExt};
use std::time::Duration;
use tokio::time::{sleep, Instant};
async fn wait(millis: u64) -> u64 {
sleep(Duration::from_millis(millis)).await;
millis
}
#[tokio::main]
async fn main() {
let mut futures = FuturesUnordered::new();
futures.push(wait(500));
futures.push(wait(300));
futures.push(wait(100));
futures.push(wait(200));
let start_time = Instant::now();
let mut num_added = 0;
while let Some(wait_time) = futures.next().await {
println!("Waited {}ms", wait_time);
if num_added < 3 {
num_added += 1;
futures.push(wait(200));
}
}
println!("Completed all work in {}ms", start_time.elapsed().as_millis());
}
(playground)
Here's a working prototype based on streams and StreamExt::for_each_concurrent, as Martin Gallagher has suggested in a comment:
use std::time::Duration;
use tokio::sync::RwLock;
use tokio::time::sleep;
use futures::stream::{self, StreamExt};
use futures::{channel::mpsc, sink::SinkExt};
async fn wait(millis: u64) -> u64 {
sleep(Duration::from_millis(millis)).await;
millis
}
#[tokio::main]
async fn main() {
let (mut sink, futures_stream) = mpsc::unbounded();
let start_futures = vec![wait(500), wait(300), wait(100), wait(200)];
let num_futures = RwLock::new(start_futures.len());
sink.send_all(&mut stream::iter(start_futures.into_iter().map(Ok)))
.await
.unwrap();
let sink_lock = RwLock::new(sink);
futures_stream
.for_each_concurrent(None, |fut| async {
let wait_time = fut.await;
println!("Waited {}", wait_time);
if some_condition() {
println!("Adding new future");
let mut sink = sink_lock.write().await;
sink.send(wait(100)).await.unwrap();
} else {
let mut num_futures = num_futures.write().await;
*num_futures -= 1;
}
let num_futures = num_futures.read().await;
if *num_futures <= 0 {
// Close the sink to exit the for_each_concurrent
sink_lock.write().await.close().await.unwrap();
}
})
.await;
}
While this approach works it has the drawback that we need to maintain a separate counter of remaining futures so that we can close the sink -- there's no Vec of futures for which we can check whether it's empty. Closing the sink requires another lock.
Given that I'm fairly new to Rust I wouldn't be surprised if this approach could be made more elegant.

Calling an async function synchronously with tokio [duplicate]

I am trying to use hyper to grab the content of an HTML page and would like to synchronously return the output of a future. I realized I could have picked a better example since synchronous HTTP requests already exist, but I am more interested in understanding whether we could return a value from an async calculation.
extern crate futures;
extern crate hyper;
extern crate hyper_tls;
extern crate tokio;
use futures::{future, Future, Stream};
use hyper::Client;
use hyper::Uri;
use hyper_tls::HttpsConnector;
use std::str;
fn scrap() -> Result<String, String> {
let scraped_content = future::lazy(|| {
let https = HttpsConnector::new(4).unwrap();
let client = Client::builder().build::<_, hyper::Body>(https);
client
.get("https://hyper.rs".parse::<Uri>().unwrap())
.and_then(|res| {
res.into_body().concat2().and_then(|body| {
let s_body: String = str::from_utf8(&body).unwrap().to_string();
futures::future::ok(s_body)
})
}).map_err(|err| format!("Error scraping web page: {:?}", &err))
});
scraped_content.wait()
}
fn read() {
let scraped_content = future::lazy(|| {
let https = HttpsConnector::new(4).unwrap();
let client = Client::builder().build::<_, hyper::Body>(https);
client
.get("https://hyper.rs".parse::<Uri>().unwrap())
.and_then(|res| {
res.into_body().concat2().and_then(|body| {
let s_body: String = str::from_utf8(&body).unwrap().to_string();
println!("Reading body: {}", s_body);
Ok(())
})
}).map_err(|err| {
println!("Error reading webpage: {:?}", &err);
})
});
tokio::run(scraped_content);
}
fn main() {
read();
let content = scrap();
println!("Content = {:?}", &content);
}
The example compiles and the call to read() succeeds, but the call to scrap() panics with the following error message:
Content = Err("Error scraping web page: Error { kind: Execute, cause: None }")
I understand that I failed to launch the task properly before calling .wait() on the future but I couldn't find how to properly do it, assuming it's even possible.
Standard library futures
Let's use this as our minimal, reproducible example:
async fn example() -> i32 {
42
}
Call executor::block_on:
use futures::executor; // 0.3.1
fn main() {
let v = executor::block_on(example());
println!("{}", v);
}
Tokio
Use the tokio::main attribute on any function (not just main!) to convert it from an asynchronous function to a synchronous one:
use tokio; // 0.3.5
#[tokio::main]
async fn main() {
let v = example().await;
println!("{}", v);
}
tokio::main is a macro that transforms this
#[tokio::main]
async fn main() {}
Into this:
fn main() {
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
.block_on(async { {} })
}
This uses Runtime::block_on under the hood, so you can also write this as:
use tokio::runtime::Runtime; // 0.3.5
fn main() {
let v = Runtime::new().unwrap().block_on(example());
println!("{}", v);
}
For tests, you can use tokio::test.
async-std
Use the async_std::main attribute on the main function to convert it from an asynchronous function to a synchronous one:
use async_std; // 1.6.5, features = ["attributes"]
#[async_std::main]
async fn main() {
let v = example().await;
println!("{}", v);
}
For tests, you can use async_std::test.
Futures 0.1
Let's use this as our minimal, reproducible example:
use futures::{future, Future}; // 0.1.27
fn example() -> impl Future<Item = i32, Error = ()> {
future::ok(42)
}
For simple cases, you only need to call wait:
fn main() {
let s = example().wait();
println!("{:?}", s);
}
However, this comes with a pretty severe warning:
This method is not appropriate to call on event loops or similar I/O situations because it will prevent the event loop from making progress (this blocks the thread). This method should only be called when it's guaranteed that the blocking work associated with this future will be completed by another thread.
Tokio
If you are using Tokio 0.1, you should use Tokio's Runtime::block_on:
use tokio; // 0.1.21
fn main() {
let mut runtime = tokio::runtime::Runtime::new().expect("Unable to create a runtime");
let s = runtime.block_on(example());
println!("{:?}", s);
}
If you peek in the implementation of block_on, it actually sends the future's result down a channel and then calls wait on that channel! This is fine because Tokio guarantees to run the future to completion.
See also:
How can I efficiently extract the first element of a futures::Stream in a blocking manner?
As this is the top result that come up in search engines by the query "How to call async from sync in Rust", I decided to share my solution here. I think it might be useful.
As #Shepmaster mentioned, back in version 0.1 futures crate had beautiful method .wait() that could be used to call an async function from a sync one. This must-have method, however, was removed from later versions of the crate.
Luckily, it's not that hard to re-implement it:
trait Block {
fn wait(self) -> <Self as futures::Future>::Output
where Self: Sized, Self: futures::Future
{
futures::executor::block_on(self)
}
}
impl<F,T> Block for F
where F: futures::Future<Output = T>
{}
After that, you can just do following:
async fn example() -> i32 {
42
}
fn main() {
let s = example().wait();
println!("{:?}", s);
}
Beware that this comes with all the caveats of original .wait() explained in the #Shepmaster's answer.
This works for me using tokio:
tokio::runtime::Runtime::new()?.block_on(fooAsyncFunction())?;

Is there any way to shutdown `tokio::runtime::current_thread::Runtime`?

I'm using tokio::runtime::current_thread::Runtime and I want to able to run a future and stop the reactor in the same thread. The example on the page doesn't show how to stop the runtime. Is there any way I can do that?
The runtime will automatically shut down when when the future is complete:
use std::time::Duration;
use tokio::time; // 0.2.21
#[tokio::main]
async fn main() {
time::delay_for(Duration::from_secs(2)).await;
eprintln!("future complete");
}
See How do I synchronously return a value calculated in an asynchronous Future in stable Rust? for other ways of creating a runtime.
If you need to cancel a future, you can create something that will cause future polls to succeed. I'd probably use channels and select:
use futures::{channel::oneshot, future, FutureExt}; // 0.3.5
use std::time::Duration;
use tokio::{task, time}; // 0.2.21
#[tokio::main]
async fn main() {
let future = async {
time::delay_for(Duration::from_secs(3600)).await;
eprintln!("future complete");
};
let (cancel_tx, cancel_rx) = oneshot::channel();
let another_task = task::spawn(async {
eprintln!("Another task started");
time::delay_for(Duration::from_secs(2)).await;
eprintln!("Another task canceling the future");
cancel_tx.send(()).expect("Unable to cancel");
eprintln!("Another task exiting");
});
future::select(future.boxed(), cancel_rx).await;
another_task.await.expect("The other task panicked");
}
Here's an alternate manual solution that's very simple, brute force, and probably not-very-performant:
use pin_project::pin_project; // 0.4.17
use std::{
future::Future,
pin::Pin,
sync::{Arc, Mutex},
task::{self, Context, Poll},
thread,
time::Duration,
};
use tokio::time; // 0.2.21
#[tokio::main]
async fn main() {
let future = async {
time::delay_for(Duration::from_secs(3600)).await;
eprintln!("future complete");
};
let (future, cancel) = Cancelable::new(future);
let another_thread = thread::spawn(|| {
eprintln!("Another thread started");
thread::sleep(Duration::from_secs(2));
eprintln!("Another thread canceling the future");
cancel();
eprintln!("Another thread exiting");
});
future.await;
another_thread.join().expect("The other thread panicked");
}
#[pin_project]
#[derive(Debug)]
struct Cancelable<F> {
#[pin]
inner: F,
info: Arc<Mutex<CancelInfo>>,
}
#[derive(Debug, Default)]
struct CancelInfo {
cancelled: bool,
task: Option<task::Waker>,
}
impl<F> Cancelable<F> {
fn new(inner: F) -> (Self, impl FnOnce()) {
let info = Arc::new(Mutex::new(CancelInfo::default()));
let cancel = {
let info = info.clone();
move || {
let mut info = info.lock().unwrap();
info.cancelled = true;
if let Some(waker) = info.task.take() {
waker.wake();
}
}
};
let me = Cancelable { inner, info };
(me, cancel)
}
}
impl<F> Future for Cancelable<F>
where
F: Future<Output = ()>,
{
type Output = ();
fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let mut info = this.info.lock().unwrap();
if info.cancelled {
Poll::Ready(())
} else {
let r = this.inner.poll(ctx);
if r.is_pending() {
info.task = Some(ctx.waker().clone());
}
r
}
}
}
See also:
When is it safe to move a member value out of a pinned future?

Shared mutable state in Hyper

I'm trying to create a counter in a Hyper web server that counts the number of requests it has received. I'm using a Arc<Mutex<u64>> to hold onto count. However, I haven't been able to figure out the right combination of move and .clone() to satisfy the types of the closures. Here's some code that compiles, but resets the counter on each request:
extern crate hyper;
use hyper::rt::Future;
use hyper::service::service_fn_ok;
use hyper::{Body, Response, Server};
use std::sync::{Arc, Mutex};
fn main() {
let addr = "0.0.0.0:3000".parse().unwrap();
// FIXME want to create the counter here, not below
let server = Server::bind(&addr)
.serve(|| {
service_fn_ok(|_req| {
let counter = Arc::new(Mutex::new(0));
use_counter(counter)
})
})
.map_err(|e| eprintln!("Error: {}", e));
hyper::rt::run(server)
}
fn use_counter(counter: Arc<Mutex<u64>>) -> Response<Body> {
let mut data = counter.lock().unwrap();
*data += 1;
Response::new(Body::from(format!("Counter: {}\n", data)))
}
It turns out I was pretty close, and looking at a few other examples helped me realize the problem. Since there are two layers of closures at play here, I need to move the counter into the outer closure, clone it, and then move that clone into the inner closure and clone there again. To wit:
extern crate hyper; // 0.12.10
use hyper::rt::Future;
use hyper::service::service_fn_ok;
use hyper::{Body, Response, Server};
use std::sync::{Arc, Mutex};
fn main() {
let addr = "0.0.0.0:3000".parse().unwrap();
let counter = Arc::new(Mutex::new(0));
let server = Server::bind(&addr)
.serve(move || {
let counter = counter.clone();
service_fn_ok(move |_req| use_counter(counter.clone()))
})
.map_err(|e| eprintln!("Error: {}", e));
hyper::rt::run(server)
}
fn use_counter(counter: Arc<Mutex<u64>>) -> Response<Body> {
let mut data = counter.lock().unwrap();
*data += 1;
Response::new(Body::from(format!("Counter: {}\n", data)))
}
Update February 2020 Here's a version using hyper 0.13:
use hyper::{Body, Response, Server, Request};
use std::sync::{Arc, Mutex};
use hyper::service::{make_service_fn, service_fn};
use std::convert::Infallible;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let addr = "0.0.0.0:3000".parse()?;
let counter = Arc::new(Mutex::new(0));
let make_service = make_service_fn(move |_conn| {
let counter = counter.clone();
async move {
Ok::<_, Infallible>(service_fn(move |_req: Request<Body>| {
let counter = counter.clone();
async move {
Ok::<_, Infallible>(use_counter(counter))
}
}))
}
});
Server::bind(&addr).serve(make_service).await?;
Ok(())
}
fn use_counter(counter: Arc<Mutex<u64>>) -> Response<Body> {
let mut data = counter.lock().unwrap();
*data += 1;
Response::new(Body::from(format!("Counter: {}\n", data)))
}

How can I chain two futures on the same resource without having to define every single method combination ahead of time?

I am writing the code to bootstrap and connect to a 2G/3G network using a SIM800L modem. This modem is interfaced with a single serial channel, which I've muxed outside of this project into 4 channels (data, text interface, control interface, status messages).
In order to bootstrap this, I need to run a series of sequential commands. This sequence changes based on the output of the modem (is the SIM locked? What kind of info does the SIM need to be unlocked? What kind of APN are we getting on? What kind of network selection do we want?). I initially thought that this would be a perfect application for futures as each individual operation can be very costly in terms of time spent idling (AT+COPS, one of the command, takes up to 10s to return).
I'm on to something like this, which, while it compiles and seems to execute commands sequentially, the third operation comes out empty. My question is twofold: why do the commands run not pop up in the result of the last future, and is there a more robust way of doing something like this?
#![feature(conservative_impl_trait)]
extern crate futures;
extern crate tokio_core;
use std::sync::{Arc, Mutex};
use futures::{future, Future};
use tokio_core::reactor::Core;
use futures::sync::oneshot;
use std::thread;
use std::io;
use std::time::Duration;
pub struct Channel {
operations: Arc<Mutex<Vec<String>>>,
}
impl Channel {
pub fn ops(&mut self) -> Box<Future<Item = Vec<String>, Error = io::Error>> {
println!("{:?}", self.operations);
let ops = Arc::clone(&self.operations);
let ops = ops.lock().unwrap();
future::ok::<Vec<String>, io::Error>(ops.to_vec()).boxed()
}
pub fn run(&mut self, command: &str) -> Box<Future<Item = Vec<String>, Error = io::Error>> {
let (tx, rx) = oneshot::channel::<Vec<String>>();
let ops = Arc::clone(&self.operations);
let str_cmd = String::from(command);
thread::spawn(move || {
thread::sleep(Duration::new(0, 10000));
let mut ops = ops.lock().unwrap();
ops.push(str_cmd.clone());
println!("Pushing op: {}", str_cmd.clone());
tx.send(vec!["OK".to_string()])
});
rx.map_err(|_| io::Error::new(io::ErrorKind::NotFound, "Test"))
.boxed()
}
}
pub struct Channels {
inner_object: Arc<Mutex<Channel>>,
}
impl Channels {
pub fn one(&self, cmd: &str) -> Box<Future<Item = Vec<String>, Error = io::Error>> {
let v = Arc::clone(&self.inner_object);
let mut v = v.lock().unwrap();
v.run(&cmd)
}
pub fn ops(&self) -> Box<Future<Item = Vec<String>, Error = io::Error>> {
let v = Arc::clone(&self.inner_object);
let mut v = v.lock().unwrap();
v.ops()
}
pub fn run_command(&self) -> Box<Future<Item = (), Error = io::Error>> {
let a = self.one("AT+CMEE=2");
let b = self.one("AT+CREG=0");
let c = self.ops();
Box::new(a.and_then(|result_1| {
assert_eq!(result_1, vec![String::from("OK")]);
b.and_then(|result_2| {
assert_eq!(result_2, vec![String::from("OK")]);
c.map(move |ops| {
assert_eq!(
ops.as_slice(),
["AT+CMEE=2".to_string(), "AT+CREG=0".to_string()]
);
})
})
}))
}
}
fn main() {
let mut core = Core::new().expect("Core should be created");
let channels = Channels {
inner_object: Arc::new(Mutex::new(Channel {
operations: Arc::new(Mutex::new(vec![])),
})),
};
let result = core.run(channels.run_command()).expect("Should've worked");
println!("{:?}", result);
}
playground
why do the commands run not pop up in the result of the last future
Because you haven't sequenced the operations to occur in that way:
let a = self.one("AT+CMEE=2");
let b = self.one("AT+CREG=0");
let c = self.ops();
This immediately builds:
a, b — promises that sleep a while before they respond
c — a promise that gets the ops in the vector
At the point in time that c is created, the sleeps have yet to terminate, so there have been no operations performed, so the vector will be empty.
Future::and_then is intended to be used to define sequential operations. This is complicated in your case as you want to use self in the body of the and_then closure. You can clone the Arc<Channel> and use that instead.
You'll note that I've made a number of simplifications:
Returning a String instead of Vec<String>
Removing unused mut qualifiers and a Mutex
Returning the operations Vec directly.
extern crate futures;
extern crate tokio_core;
use std::sync::{Arc, Mutex};
use futures::Future;
use tokio_core::reactor::Core;
use futures::sync::oneshot;
use std::thread;
use std::io;
use std::time::Duration;
pub struct Channel {
operations: Arc<Mutex<Vec<String>>>,
}
impl Channel {
fn ops(&self) -> Vec<String> {
self.operations.lock().unwrap().clone()
}
fn command(&self, command: &str) -> Box<Future<Item = String, Error = io::Error>> {
let (tx, rx) = oneshot::channel();
let ops = Arc::clone(&self.operations);
let str_cmd = String::from(command);
thread::spawn(move || {
thread::sleep(Duration::new(0, 10000));
println!("Pushing op: {}", str_cmd);
ops.lock().unwrap().push(str_cmd);
tx.send("OK".to_string())
});
Box::new(rx.map_err(|_| io::Error::new(io::ErrorKind::NotFound, "Test")))
}
}
struct Channels {
data: Arc<Channel>,
}
impl Channels {
fn run_command(&self) -> Box<Future<Item = (), Error = io::Error>> {
let d2 = Arc::clone(&self.data);
let d3 = Arc::clone(&self.data);
Box::new(
self.data
.command("AT+CMEE=2")
.and_then(move |cmee_answer| {
assert_eq!(cmee_answer, "OK"); // This should be checked in `command` and be a specific Error
d2.command("AT+CREG=0")
})
.map(move |creg_answer| {
assert_eq!(creg_answer, "OK"); // This should be checked in `command` and be a specific Error
let ops = d3.ops();
assert_eq!(ops, ["AT+CMEE=2", "AT+CREG=0"])
}),
)
}
}
fn main() {
let mut core = Core::new().expect("Core should be created");
let channels = Channels {
data: Arc::new(Channel {
operations: Arc::new(Mutex::new(vec![])),
}),
};
let result = core.run(channels.run_command()).expect("Should've worked");
println!("{:?}", result);
}
However, this isn't the type of code I usually see with futures. Instead of taking &self, many futures take self. Let's see how that would look:
extern crate futures;
extern crate tokio_core;
use std::sync::{Arc, Mutex};
use futures::Future;
use tokio_core::reactor::Core;
use futures::sync::oneshot;
use std::thread;
use std::io;
use std::time::Duration;
#[derive(Clone)]
pub struct Channel {
operations: Arc<Mutex<Vec<String>>>,
}
impl Channel {
fn ops(&self) -> Arc<Mutex<Vec<String>>> {
Arc::clone(&self.operations)
}
fn command(self, command: &str) -> Box<Future<Item = (Self, String), Error = io::Error>> {
let (tx, rx) = oneshot::channel();
let str_cmd = String::from(command);
thread::spawn(move || {
thread::sleep(Duration::new(0, 10000));
println!("Pushing op: {}", str_cmd);
self.operations.lock().unwrap().push(str_cmd);
tx.send((self, "OK".to_string()))
});
Box::new(rx.map_err(|_| io::Error::new(io::ErrorKind::NotFound, "Test")))
}
}
struct Channels {
data: Channel,
}
impl Channels {
fn run_command(self) -> Box<Future<Item = (), Error = io::Error>> {
Box::new(
self.data
.clone()
.command("AT+CMEE=2")
.and_then(|(channel, cmee_answer)| {
assert_eq!(cmee_answer, "OK");
channel.command("AT+CREG=0")
})
.map(|(channel, creg_answer)| {
assert_eq!(creg_answer, "OK");
let ops = channel.ops();
let ops = ops.lock().unwrap();
assert_eq!(*ops, ["AT+CMEE=2", "AT+CREG=0"]);
}),
)
}
}
fn main() {
let mut core = Core::new().expect("Core should be created");
let channels = Channels {
data: Channel {
operations: Arc::new(Mutex::new(vec![])),
},
};
let result = core.run(channels.run_command()).expect("Should've worked");
println!("{:?}", result);
}

Resources