I am trying to connect to multiple peers and process a series of asynchronous tasks for each peer. Once a task is completed by any peer it should short-circuit the task for all other peers even if the task is in progress.
In the following example I have 3 tasks:
use std::cell::RefCell;
use std::sync::Arc;
use std::rc::Rc;
use futures::stream::StreamExt;
use rand::Rng;
#[derive(Clone)]
struct Task {
id: u32,
}
impl Task {
fn process<'a>(&'a self, peer: &'a str) -> impl futures::future::Future<Output = ()> + 'a {
let delay = core::time::Duration::from_secs(rand::thread_rng().gen_range(5..10));
async move {
tokio::time::sleep(delay).await;
println!("{} completed task #{} first", peer, self.id);
}
}
}
async fn process_task(task: &Task, done_with_task: &Rc<RefCell<bool>>, notify_task: &Arc<tokio::sync::Notify>, peer: &str) {
if !*done_with_task.borrow() {
tokio::select! {
process = task.process(peer) => {
done_with_task.replace(true);
notify_task.notify_waiters();
}
cancel = notify_task.notified() => {}
}
}
}
#[tokio::main]
async fn main() {
let peers = vec!["peer A", "peer B", "peer C"];
let peers = futures::stream::iter(peers);
let (tx, rx) = tokio::sync::mpsc::channel(100);
let rx = tokio_stream::wrappers::ReceiverStream::new(rx);
let rx = peers.chain(rx);
let tasks = vec![
Task {id: 1},
Task {id: 2},
Task {id: 3},
];
let notify_task_1 = Arc::new(tokio::sync::Notify::new());
let done_with_task_1 = Rc::new(RefCell::new(false));
let notify_task_2 = Arc::new(tokio::sync::Notify::new());
let done_with_task_2 = Rc::new(RefCell::new(false));
let notify_task_3 = Arc::new(tokio::sync::Notify::new());
let done_with_task_3 = Rc::new(RefCell::new(false));
let handle_conn_fut = rx.for_each_concurrent(0, |peer| {
let mut tasks = tasks.clone().into_iter();
let notify_task_1 = notify_task_1.clone();
let notify_task_2 = notify_task_2.clone();
let notify_task_3 = notify_task_3.clone();
let done_with_task_1 = done_with_task_1.clone();
let done_with_task_2 = done_with_task_2.clone();
let done_with_task_3 = done_with_task_3.clone();
async move {
println!("connecting to {}", peer);
// task #1
let current_task = tasks.next().unwrap();
process_task(¤t_task, &done_with_task_1, ¬ify_task_1, peer).await;
println!("task #{} done for {}", current_task.id, peer);
// task #2
let current_task = tasks.next().unwrap();
process_task(¤t_task, &done_with_task_2, ¬ify_task_2, peer).await;
println!("task #{} done for {}", current_task.id, peer);
// task #3
let current_task = tasks.next().unwrap();
process_task(¤t_task, &done_with_task_3, ¬ify_task_3, peer).await;
println!("task #{} done for {}", current_task.id, peer);
}
});
let create_new_conn_fut = async move {
// a new peer connects after 10 seconds
tokio::time::sleep(core::time::Duration::from_secs(10)).await;
tx.send("peer D").await.unwrap()
};
// awaits both futures in parallell
futures::future::join(handle_conn_fut, create_new_conn_fut).await;
}
output:
connecting to peer A
connecting to peer B
connecting to peer C
peer A completed task #1 first
task #1 done for peer A
task #1 done for peer C
task #1 done for peer B
connecting to peer D
task #1 done for peer D
peer B completed task #2 first
task #2 done for peer B
task #2 done for peer D
task #2 done for peer C
task #2 done for peer A
peer B completed task #3 first
task #3 done for peer B
task #3 done for peer A
task #3 done for peer C
task #3 done for peer D
The output is correct, but I am trying to find a more idiomatic way to write this, without explicitly needing to handle each task.
I have thought of storing the tasks in a futures::stream::FuturesOrdered, but I am not sure how to go about this.
With abuse of notation, I would like something like this to work in theory:
let tasks = Rc::new(RefCell::new(tasks)) // ?
let handle_conn_fut = rx.for_each_concurrent(0, |peer| {
async move {
println!("connecting to {}", peer);
while let Some(task) = tasks.borrow().next() {
tokio::select! {
complete = tasks.process(peer) => {
// mutate tasks
tasks.borrow_mut().pop() // ?
}
state_change = tasks.is_mutated() => {} // ?
}
}
}
});
Solved this by storing the current task in a tokio::sync::watch::channel which makes it possible to await changes to the task with has_changed. The backlog or future tasks needs to stored in mutable memory location that can be moved into different async blocks, so I used Rc<RefCell<Vec<Task>>> for this. One could maybe use the Rc<RefCell<Vec<Task>>> directly together with tokio::sync::Notify instead of a channel, but I had some trouble with borrowing and mutating the Rc<RefCell<Vec<Task>>> simultaneously in process_taskas explained here: https://doc.rust-lang.org/stable/std/cell/struct.RefCell.html#panics-4
use std::cell::RefCell;
use std::rc::Rc;
use futures::stream::StreamExt;
use rand::Rng;
#[derive(Clone, Debug)]
struct Task {
id: u32,
}
impl Task {
fn process<'a>(&'a self, peer: &'a str) -> impl futures::future::Future<Output = ()> + 'a {
let delay = core::time::Duration::from_secs(rand::thread_rng().gen_range(5..10));
async move {
tokio::time::sleep(delay).await;
println!("{} completed task #{} first", peer, self.id);
}
}
}
async fn process_task(task: &Task, peer: &str, task_backlog: &Rc<RefCell<Vec<Task>>>, tx_task: &tokio::sync::watch::Sender<Task>, rx_task: &mut tokio::sync::watch::Receiver<Task>) {
tokio::select! {
process = task.process(peer) => {
let new_task = task_backlog.borrow_mut().pop().unwrap();
tx_task.send(new_task).unwrap()
}
cancel = rx_task.changed() => {}
}
}
#[tokio::main]
async fn main() {
let peers = vec!["peer A", "peer B", "peer C"];
let peers = futures::stream::iter(peers);
let (tx, rx) = tokio::sync::mpsc::channel(100);
let rx = tokio_stream::wrappers::ReceiverStream::new(rx);
let rx = peers.chain(rx);
let task_backlog = Rc::new(RefCell::new(vec![
Task {id: 3},
Task {id: 2},
]));
let (tx_task, rx_task) = tokio::sync::watch::channel(Task { id: 1 });
let tx_task_ref = &tx_task;
let handle_conn_fut = rx.for_each_concurrent(0, |peer| {
let task_backlog = task_backlog.clone();
let mut rx_task = rx_task.clone();
let tx_task = tx_task_ref.clone();
async move {
println!("connecting to {}", peer);
loop {
let current_task = rx_task.borrow_and_update().clone();
process_task(¤t_task, peer, &task_backlog, &tx_task, &mut rx_task).await;
println!("task #{} done for {}", current_task.id, peer);
}
}
});
let create_new_conn_fut = async move {
// a new peer connects after 10 seconds
tokio::time::sleep(core::time::Duration::from_secs(10)).await;
tx.send("peer D").await.unwrap()
};
// awaits both futures in parallell
futures::future::join(handle_conn_fut, create_new_conn_fut).await;
}
Related
I'm trying to run multiple invocation of the same script on a single deno MainWorker concurrently, and waiting for their
results (since the scripts can be async). Conceptually, I want something like the loop in run_worker below.
type Tx = Sender<(String, Sender<String>)>;
type Rx = Receiver<(String, Sender<String>)>;
struct Runner {
worker: MainWorker,
futures: FuturesUnordered<Pin<Box<dyn Future<Output=(String, Result<Global<Value>, Error>)>>>>,
response_futures: FuturesUnordered<Pin<Box<dyn Future<Output=(String, Result<(), SendError<String>>)>>>>,
result_senders: HashMap<String, Sender<String>>,
}
impl Runner {
fn new() ...
async fn run_worker(&mut self, rx: &mut Rx, main_module: ModuleSpecifier, user_module: ModuleSpecifier) {
self.worker.execute_main_module(&main_module).await.unwrap();
self.worker.preload_side_module(&user_module).await.unwrap();
loop {
tokio::select! {
msg = rx.recv() => {
if let Some((id, sender)) = msg {
let global = self.worker.js_runtime.execute_script("test", "mod.entry()").unwrap();
self.result_senders.insert(id, sender);
self.futures.push(Box::pin(async {
let resolved = self.worker.js_runtime.resolve_value(global).await;
return (id, resolved);
}));
}
},
script_result = self.futures.next() => {
if let Some((id, out)) = script_result {
self.response_futures.push(Box::pin(async {
let value = deserialize_value(out.unwrap(), &mut self.worker);
let res = self.result_senders.remove(&id).unwrap().send(value).await;
return (id.clone(), res);
}));
}
},
// also handle response_futures here
else => break,
}
}
}
}
The worker can't be borrowed as mutable multiple times, so this won't work. So the worker has to be a RefCell, and
I've created a BorrowingFuture:
struct BorrowingFuture {
worker: RefCell<MainWorker>,
global: Global<Value>,
id: String
}
And its poll implementation:
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match Pin::new(&mut Box::pin(self.worker.borrow_mut().js_runtime.resolve_value(self.global.clone()))).poll(cx) {
Poll::Ready(result) => Poll::Ready((self.id.clone(), result)),
Poll::Pending => {
cx.waker().clone().wake_by_ref();
Poll::Pending
}
}
}
So the above
self.futures.push(Box::pin(async {
let resolved = self.worker.js_runtime.resolve_value(global).await;
return (id, resolved);
}));
would become
self.futures.push(Box::pin(BorrowingFuture{worker: self.worker, global: global.clone(), id: id.clone()}));
and this would have to be done for the response_futures above as well.
But I see a few issues with this.
Creating a new future on every poll and then polling that seems wrong, but it does work.
It probably has a performance impact because new objects are created constantly.
The same issue would happen for the response futures, which would call send on each poll, which seems completely wrong.
The waker.wake_by_ref is called on every poll, because there is no way to know when a script result
will resolve. This results in the future being polled thousands (and more) times per second (always creating a new object),
which could be the same as checking it in a loop, I guess.
Note My current setup doesn't use select!, but an enum as Output from multiple Future implementations, pushed into
a single FuturesUnordered, and then matched to handle the correct type (script, send, receive). I used select here because
it's far less verbose, and gets the point across.
Is there a way to do this better/more efficiently? Or is it just not the way MainWorker was meant to be used?
main for completeness:
#[tokio::main]
async fn main() {
let main_module = deno_runtime::deno_core::resolve_url(MAIN_MODULE_SPECIFIER).unwrap();
let user_module = deno_runtime::deno_core::resolve_url(USER_MODULE_SPECIFIER).unwrap();
let (tx, mut rx) = channel(1);
let (result_tx, mut result_rx) = channel(1);
let handle = thread::spawn(move || {
let runtime = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap();
let mut runner = Runner::new();
runtime.block_on(runner.run_worker(&mut rx, main_module, user_module));
});
tx.send(("test input".to_string(), result_tx)).await.unwrap();
let result = result_rx.recv().await.unwrap();
println!("result from worker {}", result);
handle.join().unwrap();
}
TL;DR I'm trying to have a background thread that's ID'd that is controlled via that ID and web calls, and the background threads doesn't seem to be getting the message via all the types of channels I've tried.
I've tried both the std channels as well as tokio's, and of those I've tried all but the watcher type from tokio. All have the same result which probably means that I've messed something up somewhere without realizing it, but I can't find the issue:
use std::collections::{
hash_map::Entry::{Occupied, Vacant},
HashMap,
};
use std::sync::Arc;
use tokio::sync::mpsc::{self, UnboundedSender};
use tokio::sync::RwLock;
use tokio::task::JoinHandle;
use uuid::Uuid;
use warp::{http, Filter};
#[derive(Default)]
pub struct Switcher {
pub handle: Option<JoinHandle<bool>>,
pub pipeline_end_tx: Option<UnboundedSender<String>>,
}
impl Switcher {
pub fn set_sender(&mut self, tx: UnboundedSender<String>) {
self.pipeline_end_tx = Some(tx);
}
pub fn set_handle(&mut self, handle: JoinHandle<bool>) {
self.handle = Some(handle);
}
}
const ADDR: [u8; 4] = [0, 0, 0, 0];
const PORT: u16 = 3000;
type RunningPipelines = Arc<RwLock<HashMap<String, Arc<RwLock<Switcher>>>>>;
#[tokio::main]
async fn main() {
let running_pipelines = Arc::new(RwLock::new(HashMap::<String, Arc<RwLock<Switcher>>>::new()));
let session_create = warp::post()
.and(with_pipelines(running_pipelines.clone()))
.and(warp::path("session"))
.then(|pipelines: RunningPipelines| async move {
println!("session requested OK!");
let id = Uuid::new_v4();
let mut switcher = Switcher::default();
let (tx, mut rx) = mpsc::unbounded_channel::<String>();
switcher.set_sender(tx);
let t = tokio::spawn(async move {
println!("Background going...");
//This would be something processing in the background until it received the end signal
match rx.recv().await {
Some(v) => {
println!(
"Got end message:{} YESSSSSS#!##!!!!!!!!!!!!!!!!1111eleven",
v
);
}
None => println!("Error receiving end signal:"),
}
println!("ABORTING HANDLE");
true
});
let ret = HashMap::from([("session_id", id.to_string())]);
switcher.set_handle(t);
{
pipelines
.write()
.await
.insert(id.to_string(), Arc::new(RwLock::new(switcher)));
}
Ok(warp::reply::json(&ret))
});
let session_end = warp::delete()
.and(with_pipelines(running_pipelines.clone()))
.and(warp::path("session"))
.and(warp::query::<HashMap<String, String>>())
.then(
|pipelines: RunningPipelines, p: HashMap<String, String>| async move {
println!("session end requested OK!: {:?}", p);
match p.get("session_id") {
None => Ok(warp::reply::with_status(
"Please specify session to end",
http::StatusCode::BAD_REQUEST,
)),
Some(id) => {
let mut pipe = pipelines.write().await;
match pipe.entry(String::from(id)) {
Occupied(handle) => {
println!("occupied");
let (k, v) = handle.remove_entry();
drop(pipe);
println!("removed from hashmap, key:{}", k);
let s = v.write().await;
if let Some(h) = &s.handle {
if let Some(tx) = &s.pipeline_end_tx {
match tx.send("goodbye".to_string()) {
Ok(res) => {
println!(
"sent end message|{:?}| to fpipeline: {}",
res, id
);
//Added this to try to get it to at least Error on the other side
drop(tx);
},
Err(err) => println!(
"ERROR sending end message to pipeline({}):{}",
id, err
),
};
} else {
println!("no sender channel found for pipeline: {}", id);
};
h.abort();
} else {
println!(
"no luck finding the value in handle in the switcher: {}",
id
);
};
}
Vacant(_) => {
println!("no luck finding the handle in the pipelines: {}", id)
}
};
Ok(warp::reply::with_status("done", http::StatusCode::OK))
}
}
},
);
let routes = session_create
.or(session_end)
.recover(handle_rejection)
.with(warp::cors().allow_any_origin());
println!("starting server...");
warp::serve(routes).run((ADDR, PORT)).await;
}
async fn handle_rejection(
err: warp::Rejection,
) -> Result<impl warp::Reply, std::convert::Infallible> {
Ok(warp::reply::json(&format!("{:?}", err)))
}
fn with_pipelines(
pipelines: RunningPipelines,
) -> impl Filter<Extract = (RunningPipelines,), Error = std::convert::Infallible> + Clone {
warp::any().map(move || pipelines.clone())
}
depends:
[dependencies]
warp = "0.3"
tokio = { version = "1", features = ["full"] }
uuid = { version = "0.8.2", features = ["serde", "v4"] }
Results when I boot up, send a "create" request, and then an "end" request with the received ID:
starting server...
session requested OK!
Background going...
session end requested OK!: {"session_id": "6b984a45-38d8-41dc-bf95-422f75c5a429"}
occupied
removed from hashmap, key:6b984a45-38d8-41dc-bf95-422f75c5a429
sent end message|()| to fpipeline: 6b984a45-38d8-41dc-bf95-422f75c5a429
You'll notice that the background thread starts (and doesn't end) when the "create" request is made, but when the "end" request is made, while everything appears to complete successfully from the request(web) side, the background thread doesn't ever receive the message. As I've said I've tried all different channel types and moved things around to get it into this configuration... i.e. flattened and thread safetied as much as I could or at least could think of. I'm greener than I would like in rust, so any help would be VERY appreciated!
I think that the issue here is that you are sending the message and then immediately aborting the background task:
tx.send("goodbye".to_string());
//...
h.abort();
And the background task does not have time to process the message, as the abort is of higher priority.
What you need is to join the task, not to abort it.
Curiously, tokio tasks handles do not have a join() method, instead you wait for the handle itself. But for that you need to own the handle, so first you have to extract the handle from the Switcher:
let mut s = v.write().await;
//steal the task handle
if let Some(h) = s.handle.take() {
//...
tx.send("goodbye".to_string());
//...
//join the task
h.await.unwrap();
}
Note that joining a task may fail, in case the task is aborted or panicked. I'm just panicking in the code above, but you may want to do something different.
Or... you could not to wait for the task. In tokio if you drop a task handle, it will be detached. Then, it will finish when it finishes.
I have code that sends file modification events over a tokio channel. The messages originate in a dedicated thread (I think), and end up in a tokio thread. It works fine if the received is in the main tokio task but if I move it to a spawned task then for some reason it rx.next() immediately fails. Here's the code:
use futures::{
channel::mpsc::{channel, Receiver},
SinkExt, StreamExt,
};
use notify::{Event, RecommendedWatcher, RecursiveMode, Watcher};
use std::path::Path;
fn async_watcher() -> notify::Result<(RecommendedWatcher, Receiver<notify::Result<Event>>)> {
let (mut tx, rx) = channel(1);
let watcher = RecommendedWatcher::new(move |res| {
futures::executor::block_on(async {
tx.send(res).await.unwrap();
})
})?;
Ok((watcher, rx))
}
#[tokio::main]
async fn main() {
let path = std::env::args()
.nth(1)
.expect("Argument 1 needs to be a path");
println!("watching {}", path);
// watch_with_task(&path).await.unwrap();
watch_without_task(&path).await;
}
fn watch_with_task(path: &str) -> tokio::task::JoinHandle<()> {
let (mut watcher, mut rx) = async_watcher().unwrap();
watcher
.watch(Path::new(path), RecursiveMode::NonRecursive)
.unwrap();
tokio::spawn(async move {
eprintln!("Watch task spawned");
while let Some(res) = rx.next().await {
match res {
Ok(event) => eprintln!("File changed: {:?}", event),
Err(e) => eprintln!("Watch error: {:?}", e),
}
}
eprintln!("Watch task finished");
})
}
async fn watch_without_task(path: &str) {
let (mut watcher, mut rx) = async_watcher().unwrap();
watcher
.watch(Path::new(path), RecursiveMode::NonRecursive)
.unwrap();
while let Some(res) = rx.next().await {
match res {
Ok(event) => eprintln!("File changed: {:?}", event),
Err(e) => eprintln!("Watch error: {:?}", e),
}
}
}
Dependencies:
[dependencies]
futures = "0.3"
notify = "5.0.0-pre.13"
tokio = { version = "1.6", features = ["full"] }
With watch_without_task I get this output - it works:
watching /path/to/file
File changed: Event { kind: Modify(Metadata(Any)), paths: ["/path/to/file"], attr:tracker: None, attr:flag: None, attr:info: None, attr:source: None }
File changed: Event { kind: Modify(Data(Content)), paths: ["/path/to/file"], attr:tracker: None, attr:flag: None, attr:info: None, attr:source: None }
With watch_with_task I get this:
watching /path/to/file
Watch task spawned
Watch task finished
It exits immediately. Why?
Oooo actually I was wrong. The issue is that when using the task the watcher gets dropped immediately, whereas without the task it is kept alive by the while loop.
I am solving a problem for the website Exercism in rust, where I basically try to concurrently count how many times different letters occur in some text. I am doing this by passing hashmaps between threads, and somehow am in some kind of infinite loop. I think the issue is in my handling of the receiver, but I really don't know. Please help.
use std::collections::HashMap;
use std::thread;
use std::sync::mpsc;
use std::str;
pub fn frequency(input: &[&str], worker_count: usize) -> HashMap<char, usize> {
// Empty case
if input.is_empty() {
return HashMap::new();
}
// Flatten input, set workload for each thread, create hashmap to catch results
let mut flat_input = input.join("");
let workload = input.len() / worker_count;
let mut final_map: HashMap<char, usize> = HashMap::new();
let (tx, rx) = mpsc::channel();
for _i in 0..worker_count {
let task = flat_input.split_off(flat_input.len() - workload);
let tx_clone = mpsc::Sender::clone(&tx);
// Separate threads ---------------------------------------------
thread::spawn(move || {
let mut partial_map: HashMap<char, usize> = HashMap::new();
for letter in task.chars() {
match partial_map.remove(&letter) {
Some(count) => {
partial_map.insert(letter, count + 1);
},
None => {
partial_map.insert(letter, 1);
}
}
}
tx_clone.send(partial_map).expect("Didn't work fool");
});
// --------------------------------------------------
}
// iterate through the returned hashmaps to update the final map
for received in rx {
for (key, value) in received {
match final_map.remove(&key) {
Some(count) => {
final_map.insert(key, count + value);
},
None => {
final_map.insert(key, value);
}
}
}
}
return final_map;
}
Iterating on the receiver rx will block for new messages while senders exist. The ones you've cloned into the threads will drop out of scope when they're done, but you have the original sender tx still in scope.
You can force tx out of scope by dropping it manually:
for _i in 0..worker_count {
...
}
std::mem::drop(tx); // <--------
for received in rx {
...
}
I am dabbling in tokio-core and can figure out how to spawn an event loop. However there are two things i am not sure of - how to gracefully exit the event loop and how to exit a stream running inside an event loop. For e.g consider this simple piece of code which spawns two listeners into the event loop and waits for another thread to indicate an exit condition:
extern crate tokio_core;
extern crate futures;
use tokio_core::reactor::Core;
use futures::sync::mpsc::unbounded;
use tokio_core::net::TcpListener;
use std::net::SocketAddr;
use std::str::FromStr;
use futures::{Stream, Future};
use std::thread;
use std::time::Duration;
use std::sync::mpsc::channel;
fn main() {
let (get_tx, get_rx) = channel();
let j = thread::spawn(move || {
let mut core = Core::new().unwrap();
let (tx, rx) = unbounded();
get_tx.send(tx).unwrap(); // <<<<<<<<<<<<<<< (1)
// Listener-0
{
let l = TcpListener::bind(&SocketAddr::from_str("127.0.0.1:44444").unwrap(),
&core.handle())
.unwrap();
let fe = l.incoming()
.for_each(|(_sock, peer)| {
println!("Accepted from {}", peer);
Ok(())
})
.map_err(|e| println!("----- {:?}", e));
core.handle().spawn(fe);
}
// Listener1
{
let l = TcpListener::bind(&SocketAddr::from_str("127.0.0.1:55555").unwrap(),
&core.handle())
.unwrap();
let fe = l.incoming()
.for_each(|(_sock, peer)| {
println!("Accepted from {}", peer);
Ok(())
})
.map_err(|e| println!("----- {:?}", e));
core.handle().spawn(fe);
}
let work = rx.for_each(|v| {
if v {
// (3) I want to shut down listener-0 above the release the resources
Ok(())
} else {
Err(()) // <<<<<<<<<<<<<<< (2)
}
});
let _ = core.run(work);
println!("Exiting event loop thread");
});
let tx = get_rx.recv().unwrap();
thread::sleep(Duration::from_secs(2));
println!("Want to terminate listener-0"); // <<<<<< (3)
tx.send(true).unwrap();
thread::sleep(Duration::from_secs(2));
println!("Want to exit event loop");
tx.send(false).unwrap();
j.join().unwrap();
}
So say after the sleep in the main thread i want a clean exit of the event loop thread. Currently I send something to the event loop to make it exit and thus releasing the thread.
However both, (1) and (2) feel hacky - i am forcing an error as an exit condition. My questions are:
1) Am I doing it right ? If not then what is the correct way to gracefully exit the event loop thread.
2) I don't event know how to do (3) - i.e. indicate a condition externally to shutdown listener-0 and free all it's resources. How do i achieve this ?
The event loop (core) is not being turned any more (e.g. by run()) or is forgotten (drop()ed). There is no synchronous exit. core.run() returns and stops turning the loop when the Future passed to it completes.
A Stream completes by yielding None (marked with (3) in the code below).
When e.g. a TCP connection is closed the Stream representing it completes and the other way around.
extern crate tokio_core;
extern crate futures;
use tokio_core::reactor::Core;
use futures::sync::mpsc::unbounded;
use tokio_core::net::TcpListener;
use std::net::SocketAddr;
use std::str::FromStr;
use futures::{Async, Stream, Future, Poll};
use std::thread;
use std::time::Duration;
struct CompletionPact<S, C>
where S: Stream,
C: Stream,
{
stream: S,
completer: C,
}
fn stream_completion_pact<S, C>(s: S, c: C) -> CompletionPact<S, C>
where S: Stream,
C: Stream,
{
CompletionPact {
stream: s,
completer: c,
}
}
impl<S, C> Stream for CompletionPact<S, C>
where S: Stream,
C: Stream,
{
type Item = S::Item;
type Error = S::Error;
fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
match self.completer.poll() {
Ok(Async::Ready(None)) |
Err(_) |
Ok(Async::Ready(Some(_))) => {
// We are done, forget us
Ok(Async::Ready(None)) // <<<<<< (3)
},
Ok(Async::NotReady) => {
self.stream.poll()
},
}
}
}
fn main() {
// unbounded() is the equivalent of a Stream made from a channel()
// directly create it in this thread instead of receiving a Sender
let (tx, rx) = unbounded::<()>();
// A second one to cause forgetting the listener
let (l0tx, l0rx) = unbounded::<()>();
let j = thread::spawn(move || {
let mut core = Core::new().unwrap();
// Listener-0
{
let l = TcpListener::bind(
&SocketAddr::from_str("127.0.0.1:44444").unwrap(),
&core.handle())
.unwrap();
// wrap the Stream of incoming connections (which usually doesn't
// complete) into a Stream that completes when the
// other side is drop()ed or sent on
let fe = stream_completion_pact(l.incoming(), l0rx)
.for_each(|(_sock, peer)| {
println!("Accepted from {}", peer);
Ok(())
})
.map_err(|e| println!("----- {:?}", e));
core.handle().spawn(fe);
}
// Listener1
{
let l = TcpListener::bind(
&SocketAddr::from_str("127.0.0.1:55555").unwrap(),
&core.handle())
.unwrap();
let fe = l.incoming()
.for_each(|(_sock, peer)| {
println!("Accepted from {}", peer);
Ok(())
})
.map_err(|e| println!("----- {:?}", e));
core.handle().spawn(fe);
}
let _ = core.run(rx.into_future());
println!("Exiting event loop thread");
});
thread::sleep(Duration::from_secs(2));
println!("Want to terminate listener-0");
// A drop() will result in the rx side Stream being completed,
// which is indicated by Ok(Async::Ready(None)).
// Our wrapper behaves the same when something is received.
// When the event loop encounters a
// Stream that is complete it forgets about it. Which propagates to a
// drop() that close()es the file descriptor, which closes the port if
// nothing else uses it.
l0tx.send(()).unwrap(); // alternatively: drop(l0tx);
// Note that this is async and is only the signal
// that starts the forgetting.
thread::sleep(Duration::from_secs(2));
println!("Want to exit event loop");
// Same concept. The reception or drop() will cause Stream completion.
// A completed Future will cause run() to return.
tx.send(()).unwrap();
j.join().unwrap();
}
I implemented graceful shutdown via a oneshot channel.
The trick was to use both a oneshot channel to cancel the tcp listener, and use a select! on the two futures. Note I'm using tokio 0.2 and futures 0.3 in the example below.
use futures::channel::oneshot;
use futures::{FutureExt, StreamExt};
use std::thread;
use tokio::net::TcpListener;
pub struct ServerHandle {
// This is the thread in which the server will block
thread: thread::JoinHandle<()>,
// This switch can be used to trigger shutdown of the server.
kill_switch: oneshot::Sender<()>,
}
impl ServerHandle {
pub fn stop(self) {
self.kill_switch.send(()).unwrap();
self.thread.join().unwrap();
}
}
pub fn run_server() -> ServerHandle {
let (kill_switch, kill_switch_receiver) = oneshot::channel::<()>();
let thread = thread::spawn(move || {
info!("Server thread begun!!!");
let mut runtime = tokio::runtime::Builder::new()
.basic_scheduler()
.enable_all()
.thread_name("Tokio-server-thread")
.build()
.unwrap();
runtime.block_on(async {
server_prog(kill_switch_receiver).await.unwrap();
});
info!("Server finished!!!");
});
ServerHandle {
thread,
kill_switch,
}
}
async fn server_prog(kill_switch_receiver: oneshot::Receiver<()>) -> std::io::Result<()> {
let addr = "127.0.0.1:12345";
let addr: std::net::SocketAddr = addr.parse().unwrap();
let mut listener = TcpListener::bind(&addr).await?;
let mut kill_switch_receiver = kill_switch_receiver.fuse();
let mut incoming = listener.incoming().fuse();
loop {
futures::select! {
x = kill_switch_receiver => {
break;
},
optional_new_client = incoming.next() => {
if let Some(new_client) = optional_new_client {
let peer_socket = new_client?;
info!("Client connected!");
let peer = process_client(peer_socket, db.clone());
peers.lock().unwrap().push(peer);
} else {
info!("No more incoming connections.");
break;
}
},
};
}
Ok(())
}
Hopes this helps others (or future me ;)).
My code lives here:
https://github.com/windelbouwman/lognplot/blob/master/lognplot/src/server/server.rs