I'm new to Rust and have implemented a network receiver thread where in I'm testing my implementation by sending data to the same socket address the receiver socket is bound to(making use of dynamic binding by binding the socket to port 0). But I'm getting thread 'google' panicked at 'Didn't receive data: Os { code: 10054, kind: ConnectionReset, message: "An existing connection was forcibly closed by the remote host." }', src\ethernet_interface.rs:62:42. This is my code
src/main.rs
mod ethernet_interface;
mod WWW_sim_interface;
use crate::WWW_sim_interface::WWWSimInterface;
use std::{thread, time};
fn main() {
let www_sim_interface = WWWSimInterface::new(String::from("google"));
let mut www_sim_interface_runnable = www_sim_interface.start();
let two_seconds = time::Duration::from_secs(2);
thread::sleep(two_seconds);
www_sim_interface_runnable.terminate_WWW_sim();
www_sim_interface_runnable.join_handle.join();
}
src/WWW_sim_interface.rs
use std::net::{SocketAddr, IpAddr, Ipv4Addr};
use std::thread;
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
use std::mem;
use crate::ethernet_interface::EthernetInterface;
pub struct WWWSimInterface {
target_ID: String,
terminate_flag: AtomicBool,
eth_interface: Option<EthernetInterface>,
}
pub struct RunningWWWSimInterface {
pub join_handle: thread::JoinHandle<()>,
WWW_sim_interface: Arc<WWWSimInterface>,
}
impl WWWSimInterface {
pub fn new(target_ID: String) -> WWWSimInterface {
let mut WWW_sim_interface = WWWSimInterface {
target_ID: target_ID,
terminate_flag: AtomicBool::new(false),
eth_interface: Some(EthernetInterface::new()),
};
WWW_sim_interface.eth_interface.as_mut().expect("Error").setup_receiver(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0));
WWW_sim_interface
}
pub fn start(self) -> RunningWWWSimInterface {
let WWW_sim_interface = Arc::new(self);
let join_handle = {
let WWW_sim_interface = WWW_sim_interface.clone();
thread::Builder::new().name(WWW_sim_interface.target_ID.clone()).spawn(move || WWW_sim_interface.run()).ok().unwrap()
};
RunningWWWSimInterface {
join_handle,
WWW_sim_interface,
}
}
pub fn run(&self) {
let mut buff: [u8; 2048] = [0; 2048];
let mut msg_ID: u16;
println!("started receiver thread");
while !self.terminate_flag.load(Ordering::Relaxed) {
let data_len = self.eth_interface.as_ref().expect("Error").recv_data(&mut buff);
if data_len < 2 {
continue;
}
let vec_buff = buff[..data_len].to_vec();
let (int_bytes, rest) = buff.split_at(mem::size_of::<u16>());
msg_ID = u16::from_be_bytes(int_bytes.try_into().unwrap());
if msg_ID == 1234 {
break;
}
}
}
}
impl RunningWWW_simInterface {
pub fn terminate_WWWSim(&mut self) {
self.WWW_sim_interface.terminate_flag.store(true, Ordering::Relaxed);
let msg_ID: u16 = 1234;
// self.WWW_sim_interface.eth_interface.as_ref().expect("Error").send_data(&msg_ID.to_be_bytes());
self.WWW_sim_interface.eth_interface.as_ref().expect("Error").send_self(&msg_ID.to_be_bytes());
}
}
src/ethernet_interface.rs
use std::net::{UdpSocket, SocketAddr, IpAddr, Ipv4Addr, Ipv6Addr};
use std::net::IpAddr::V4;
use std::net::IpAddr::V6;
pub struct EthernetInterface {
socket: Option<UdpSocket>,
recv_IP_sock_addr: Option<SocketAddr>,
send_IP_sock_addr: Option<SocketAddr>,
}
impl EthernetInterface {
pub fn new() -> EthernetInterface {
EthernetInterface {
socket: None,
recv_IP_sock_addr: None,
send_IP_sock_addr: None,
}
}
pub fn setup_receiver(&mut self, mut recv_IP_sock_addr: SocketAddr) {
let ip_addr = recv_IP_sock_addr.ip();
let first_octect: u8 = match ip_addr {
V4(ip4_addr) => ip4_addr.octets().to_vec()[0],
V6(ip6_addr) => ip6_addr.octets().to_vec()[0]
};
if first_octect > 223 && first_octect < 240 {
match ip_addr {
V4(ip4_addr) => recv_IP_sock_addr.set_ip(IpAddr::V4(Ipv4Addr::UNSPECIFIED)),
V6(ip6_addr) => recv_IP_sock_addr.set_ip(IpAddr::V6(Ipv6Addr::UNSPECIFIED))
};
}
self.recv_IP_sock_addr = Some(recv_IP_sock_addr);
self.socket = Some(UdpSocket::bind(self.recv_IP_sock_addr.unwrap()).unwrap());
if first_octect > 223 && first_octect < 240 {
let ip_addr = self.recv_IP_sock_addr.unwrap().ip();
match ip_addr {
V4(ip4_addr) => self.socket.as_ref().unwrap().join_multicast_v4(&ip4_addr, &Ipv4Addr::UNSPECIFIED).ok(),
V6(ip6_addr) => self.socket.as_ref().unwrap().join_multicast_v6(&ip6_addr, 0).ok()
};
}
}
pub fn setup_sender(&mut self, send_IP_sock_addr: SocketAddr) {
let ip_addr = send_IP_sock_addr.ip();
let first_octect = match ip_addr {
V4(ip4_addr) => ip4_addr.octets().to_vec()[0],
V6(ip6_addr) => ip6_addr.octets().to_vec()[0]
};
self.send_IP_sock_addr = Some(send_IP_sock_addr);
if first_octect > 223 && first_octect < 240 {
self.socket.as_ref().unwrap().set_multicast_loop_v4(false).expect("set_multicast_loop_v4 call failed");
self.socket.as_ref().unwrap().set_multicast_ttl_v4(8).expect("set_multicast_ttl_v4 call failed");
}
}
pub fn recv_data(&self, buff: &mut [u8]) -> usize {
let (number_of_bytes, src_addr) = self.socket.as_ref().unwrap().recv_from(buff)
.expect("Didn't receive data");
println!("recvd data");
number_of_bytes
}
pub fn send_data(&self, buff: &[u8]) {
self.socket.as_ref().unwrap().send_to(buff, self.send_IP_sock_addr.unwrap()).expect("couldn't send data");
}
pub fn get_host_bound_port(&self) -> u16 {
self.socket.as_ref().unwrap().local_addr().unwrap().port()
}
pub fn get_src_addr(&mut self) {
let mut buff: [u8; 2048] = [0; 2048];
let (_, src_addr) = self.socket.as_ref().unwrap().recv_from(&mut buff)
.expect("Didn't receive data");
self.send_IP_sock_addr = Some(src_addr);
}
pub fn send_self(&self, buff: &[u8]) {
self.socket.as_ref().unwrap().send_to(buff, self.recv_IP_sock_addr.unwrap()).expect("couldn't end self data");
println!("sent data");
}
}
Related
I found this script in the post Recommended way of IPC in Rust where a server and client are created with named pipes.
I want to understand how it works so I started debugging. When I start the server with cargo run listen, the program reaches the open function and the following happens. I know this is a feature and not a bug, but I do not understand why it happens.
In the main function the listen function is called and then the listen function calls the open function:
use libc::{c_char, mkfifo};
use serde::{Deserialize, Serialize};
use std::env::args;
use std::fs::{File, OpenOptions};
use std::io::{Error, Read, Result, Write};
use std::os::unix::ffi::OsStrExt;
use std::path::{Path, PathBuf};
fn main() -> Result<()> {
let mut args = args();
let _ = args.next();
match args.next().as_ref().map(String::as_str) {
Some("listen") => listen()?,
Some("send") => {
let msg = args.next().unwrap();
send(msg)?;
}
_ => {
eprintln!("Please either listen or send.");
}
}
Ok(())
}
pub struct Fifo {
path: PathBuf,
}
impl Fifo {
pub fn new(path: PathBuf) -> Result<Self> {
let os_str = path.clone().into_os_string();
let slice = os_str.as_bytes();
let mut bytes = Vec::with_capacity(slice.len() + 1);
bytes.extend_from_slice(slice);
bytes.push(0); // zero terminated string
let _ = std::fs::remove_file(&path);
if unsafe { mkfifo((&bytes[0]) as *const u8 as *const c_char, 0o644) } != 0 {
Err(Error::last_os_error())
} else {
Ok(Fifo { path })
}
}
/// Blocks until anyone connects to this fifo.
pub fn open(&self) -> Result<FifoHandle> {
let mut pipe = OpenOptions::new().read(true).open(&self.path)?;
let mut pid_bytes = [0u8; 4];
pipe.read_exact(&mut pid_bytes)?;
let pid = u32::from_ne_bytes(pid_bytes);
drop(pipe);
let read = OpenOptions::new()
.read(true)
.open(format!("/tmp/rust-fifo-read.{}", pid))?;
let write = OpenOptions::new()
.write(true)
.open(format!("/tmp/rust-fifo-write.{}", pid))?;
Ok(FifoHandle { read, write })
}
}
impl Drop for Fifo {
fn drop(&mut self) {
let _ = std::fs::remove_file(&self.path);
}
}
#[derive(Serialize, Deserialize)]
pub enum Message {
Print(String),
Ack(),
}
pub struct FifoHandle {
read: File,
write: File,
}
impl FifoHandle {
pub fn open<P: AsRef<Path>>(path: P) -> Result<Self> {
let pid = std::process::id();
let read_fifo_path = format!("/tmp/rust-fifo-write.{}", pid);
let read_fifo = Fifo::new(read_fifo_path.into())?;
let write_fifo_path = format!("/tmp/rust-fifo-read.{}", pid);
let write_fifo = Fifo::new(write_fifo_path.into())?;
let mut pipe = OpenOptions::new().write(true).open(path.as_ref())?;
let pid_bytes: [u8; 4] = u32::to_ne_bytes(pid);
pipe.write_all(&pid_bytes)?;
pipe.flush()?;
let write = OpenOptions::new().write(true).open(&write_fifo.path)?;
let read = OpenOptions::new().read(true).open(&read_fifo.path)?;
Ok(Self { read, write })
}
pub fn send_message(&mut self, msg: &Message) -> Result<()> {
let msg = bincode::serialize(msg).expect("Serialization failed");
self.write.write_all(&usize::to_ne_bytes(msg.len()))?;
self.write.write_all(&msg[..])?;
self.write.flush()
}
pub fn recv_message(&mut self) -> Result<Message> {
let mut len_bytes = [0u8; std::mem::size_of::<usize>()];
self.read.read_exact(&mut len_bytes)?;
let len = usize::from_ne_bytes(len_bytes);
let mut buf = vec![0; len];
self.read.read_exact(&mut buf[..])?;
Ok(bincode::deserialize(&buf[..]).expect("Deserialization failed"))
}
}
fn listen() -> Result<()> {
let fifo = Fifo::new(PathBuf::from("/tmp/rust-fifo"))?;
loop {
let mut handle = fifo.open()?;
std::thread::spawn(move || {
match handle.recv_message().expect("Failed to recieve message") {
Message::Print(p) => println!("{}", p),
Message::Ack() => panic!("Didn't expect Ack now."),
}
#[allow(deprecated)]
std::thread::sleep_ms(1000);
handle
.send_message(&Message::Ack())
.expect("Send message failed.");
});
}
}
fn send(s: String) -> Result<()> {
let mut handle = FifoHandle::open("/tmp/rust-fifo")?;
#[allow(deprecated)]
std::thread::sleep_ms(1000);
handle.send_message(&Message::Print(s))?;
match handle.recv_message()? {
Message::Print(p) => println!("{}", p),
Message::Ack() => {}
}
Ok(())
}
I am writing a macro for a struct and implementing a method based on the field type. e.g. u8, Array or str.
let us say I have this enum represented as u32.
#[repr(u32)]
#[derive(Debug, Clone, Copy)]
pub enum ServerGreetingMode {
Unavailable = 0,
Unauthenticated = 1,
Authenticated = 2,
Encrypted = 4,
}
And I am applying a macro ToBytes on a struct field
#[repr(packed)]
#[derive(ToBytes, Debug, Clone, Copy)]
pub struct ServerGreetingFrame {
pub unused: [u8; 12],
pub mode: ServerGreetingMode,
pub challenge: [u8; 16],
pub salt: [u8; 16],
pub count: u32,
pub mbz: [u8; 12],
}
I am able to get it to a part where I am getting the type as ServerGreetingMode but I am unable to tell if it is an enum or not.
Here is my current implementation.
#[proc_macro_derive(ToBytes)]
pub fn derive(tokens: TokenStream) -> TokenStream {
let tokens_item = tokens.clone();
let items = syn::parse_macro_input!(tokens_item as syn::Item);
let output = match items {
syn::Item::Struct(item) => {
let name = &item.ident;
let statements = match &item.fields {
syn::Fields::Named(ref fields) => {
// eprint!("{:#?}", field);
let vary_by_type = fields.named.iter().map(|field| {
let field_name = &field.ident;
let field_type = &field.ty;
let statement = match field_type {
syn::Type::Array(syn::TypeArray { elem, .. }) => {
let ty = elem.as_ref();
match ty {
syn::Type::Path(typepath)
if typepath.qself.is_none()
&& typepath.path.leading_colon.is_none()
&& typepath.path.segments.len() == 1 && typepath.path.is_ident("u8") =>
{
quote! {
bytes.extend_from_slice(&self.#field_name);
}
},
_ => todo!(),
}
}
syn::Type::Path(ty) if ty.path.clone().is_ident("u32") => {
quote! {
bytes.extend_from_slice(&(self.#field_name as u32).to_be_bytes().to_vec());
}
},
_ => todo!(),
};
statement
});
vary_by_type
}
_ => todo!(),
};
quote! {
impl #name {
fn to_bytes(&self) -> Vec<u8> {
let mut bytes: Vec<u8> = Vec::new();
#(
#statements
)*
bytes
}
}
}
}
_ => todo!(),
};
output.into()
// let s = syn::parse_macro_input!(tokens_struct as syn::ItemStruct);
// let n = &s.ident;
// let expanded = quote! {
// impl #n {
// fn to_bytes(&self) -> Vec<u8> {
// let mut bytes: Vec<u8> = Vec::new();
// bytes
// }
// }
// };
// expanded.into()
}
Thanks.
In the __enter__ method I want to return an object which is accessible in Rust and Python, so that Rust is able to update values in the object and Python can read the updated values.
I would like to have something like this:
#![feature(specialization)]
use std::thread;
use pyo3::prelude::*;
use pyo3::types::{PyType, PyAny, PyDict};
use pyo3::exceptions::ValueError;
use pyo3::PyContextProtocol;
use pyo3::wrap_pyfunction;
#[pyclass]
#[derive(Debug, Clone)]
pub struct Statistics {
pub files: u32,
pub errors: Vec<String>,
}
fn counter(
root_path: &str,
statistics: &mut Statistics,
) {
statistics.files += 1;
statistics.errors.push(String::from("Foo"));
}
#[pyfunction]
pub fn count(
py: Python,
root_path: &str,
) -> PyResult<PyObject> {
let mut statistics = Statistics {
files: 0,
errors: Vec::new(),
};
let rc: std::result::Result<(), std::io::Error> = py.allow_threads(|| {
counter(root_path, &mut statistics);
Ok(())
});
let pyresult = PyDict::new(py);
match rc {
Err(e) => { pyresult.set_item("error", e.to_string()).unwrap();
return Ok(pyresult.into())
},
_ => ()
}
pyresult.set_item("files", statistics.files).unwrap();
pyresult.set_item("errors", statistics.errors).unwrap();
Ok(pyresult.into())
}
#[pyclass]
#[derive(Debug)]
pub struct Count {
root_path: String,
exit_called: bool,
thr: Option<thread::JoinHandle<()>>,
statistics: Statistics,
}
#[pymethods]
impl Count {
#[new]
fn __new__(
obj: &PyRawObject,
root_path: &str,
) {
obj.init(Count {
root_path: String::from(root_path),
exit_called: false,
thr: None,
statistics: Statistics {
files: 0,
errors: Vec::new(),
},
});
}
#[getter]
fn statistics(&self) -> PyResult<Statistics> {
Ok(Statistics { files: self.statistics.files,
errors: self.statistics.errors.to_vec(), })
}
}
#[pyproto]
impl<'p> PyContextProtocol<'p> for Count {
fn __enter__(&mut self) -> PyResult<Py<Count>> {
let gil = GILGuard::acquire();
self.thr = Some(thread::spawn(|| {
counter(self.root_path.as_ref(), &mut self.statistics)
}));
Ok(PyRefMut::new(gil.python(), *self).unwrap().into())
}
fn __exit__(
&mut self,
ty: Option<&'p PyType>,
_value: Option<&'p PyAny>,
_traceback: Option<&'p PyAny>,
) -> PyResult<bool> {
self.thr.unwrap().join();
let gil = GILGuard::acquire();
self.exit_called = true;
if ty == Some(gil.python().get_type::<ValueError>()) {
Ok(true)
} else {
Ok(false)
}
}
}
#[pymodule(count)]
fn init(_py: Python, m: &PyModule) -> PyResult<()> {
m.add_class::<Count>()?;
m.add_wrapped(wrap_pyfunction!(count))?;
Ok(())
}
But I'm getting the following error:
error[E0477]: the type `[closure#src/lib.rs:90:39: 92:10 self:&mut &'p mut Count]` does not fulfill the required lifetime
--> src/lib.rs:90:25
|
90 | self.thr = Some(thread::spawn(|| {
| ^^^^^^^^^^^^^
|
= note: type must satisfy the static lifetime
I've found a solution. The use of a guarded reference does the trick:
#![feature(specialization)]
use std::{thread, time};
use std::sync::{Arc, Mutex};
extern crate crossbeam_channel as channel;
use channel::{Sender, Receiver, TryRecvError};
use pyo3::prelude::*;
use pyo3::types::{PyType, PyAny};
use pyo3::exceptions::ValueError;
use pyo3::PyContextProtocol;
#[pyclass]
#[derive(Debug, Clone)]
pub struct Statistics {
pub files: u32,
pub errors: Vec<String>,
}
pub fn counter(
statistics: Arc<Mutex<Statistics>>,
cancel: &Receiver<()>,
) {
for _ in 1..15 {
thread::sleep(time::Duration::from_millis(100));
{
let mut s = statistics.lock().unwrap();
s.files += 1;
}
match cancel.try_recv() {
Ok(_) | Err(TryRecvError::Disconnected) => {
println!("Terminating.");
break;
}
Err(TryRecvError::Empty) => {}
}
}
{
let mut s = statistics.lock().unwrap();
s.errors.push(String::from("Foo"));
}
}
#[pyclass]
#[derive(Debug)]
pub struct Count {
exit_called: bool,
statistics: Arc<Mutex<Statistics>>,
thr: Option<thread::JoinHandle<()>>,
cancel: Option<Sender<()>>,
}
#[pymethods]
impl Count {
#[new]
fn __new__(obj: &PyRawObject) {
obj.init(Count {
exit_called: false,
statistics: Arc::new(Mutex::new(Statistics {
files: 0,
errors: Vec::new(),
})),
thr: None,
cancel: None,
});
}
#[getter]
fn statistics(&self) -> PyResult<u32> {
let s = Arc::clone(&self.statistics).lock().unwrap().files;
Ok(s)
}
}
#[pyproto]
impl<'p> PyContextProtocol<'p> for Count {
fn __enter__(&'p mut self) -> PyResult<()> {
let statistics = self.statistics.clone();
let (sender, receiver) = channel::bounded(1);
self.cancel = Some(sender);
self.thr = Some(thread::spawn(move || {
counter(statistics, &receiver)
}));
Ok(())
}
fn __exit__(
&mut self,
ty: Option<&'p PyType>,
_value: Option<&'p PyAny>,
_traceback: Option<&'p PyAny>,
) -> PyResult<bool> {
let _ = self.cancel.as_ref().unwrap().send(());
self.thr.take().map(thread::JoinHandle::join);
let gil = GILGuard::acquire();
self.exit_called = true;
if ty == Some(gil.python().get_type::<ValueError>()) {
Ok(true)
} else {
Ok(false)
}
}
}
#[pyproto]
impl pyo3::class::PyObjectProtocol for Count {
fn __str__(&self) -> PyResult<String> {
Ok(format!("{:?}", self))
}
}
#[pymodule(count)]
fn init(_py: Python, m: &PyModule) -> PyResult<()> {
m.add_class::<Count>()?;
Ok(())
}
Now I can run the following code:
import time
import count
c = count.Count()
with c:
for _ in range(5):
print(c.statistics)
time.sleep(0.1)
As the example shows thread cancelling also works, although a maybe nicer solution is using the crate thread_control.
I'm trying to implement a buffer with a single consumer and a single producer. I have only used POSIX Semaphores, however, they're not available in Rust and I'm trying to implement a trivial semaphore problem with Rust sync primitives (Mutex, Condvar, Barrier, ...) but I don't want to use channels.
My code behaves too irregularly, with some cases going well and other times it just stops at some number and in other cases it just doesn't start counting.
Things appear to work better if I wait 1 second in the main thread till I send the Condvar notification but it doesn't guarantee that it's not going to enter a deadlock.
How can this program be fixed? Am I understanding Condvars wrong?
use std::thread;
use std::sync::{Arc, Condvar, Mutex};
struct Buffer {
is_data: Mutex<bool>,
is_data_cv: Condvar,
is_space: Mutex<bool>,
is_space_cv: Condvar,
buffer: Mutex<i32>,
}
fn producer(buffer: Arc<Buffer>) {
for i in 0..50 {
loop {
let mut is_space = buffer
.is_space_cv
.wait(buffer.is_space.lock().unwrap())
.unwrap();
if *is_space {
{
let mut hueco = buffer.buffer.lock().unwrap();
*hueco = i;
}
*is_space = false;
{
let mut is_data = buffer.is_data.lock().unwrap();
*is_data = true;
}
buffer.is_data_cv.notify_one();
break;
}
}
}
}
fn consumer(buffer: Arc<Buffer>) {
for i in 0..50 {
loop {
let mut is_data = buffer
.is_data_cv
.wait(buffer.is_data.lock().unwrap())
.unwrap();
if *is_data {
{
let hueco = buffer.buffer.lock().unwrap();
println!("{}", *hueco);
}
*is_data = false;
{
let mut is_space = buffer.is_space.lock().unwrap();
*is_space = true;
}
buffer.is_space_cv.notify_one();
break;
}
}
}
}
fn main() {
let buffer = Arc::new(Buffer {
is_data: Mutex::new(false),
is_data_cv: Condvar::new(),
is_space: Mutex::new(true),
is_space_cv: Condvar::new(),
buffer: Mutex::new(0),
});
let b = buffer.clone();
let p = thread::spawn(move || {
producer(b);
});
let b = buffer.clone();
let c = thread::spawn(move || {
consumer(b);
});
//thread::sleep_ms(1000);
buffer.is_space_cv.notify_one();
c.join();
}
I would encourage you to create smaller methods and reuse existing Rust types such as Option. This will allow you to simplify your code quite a bit — only one Mutex and one Condvar:
use std::thread;
use std::sync::{Arc, Condvar, Mutex};
#[derive(Debug, Default)]
struct Buffer {
data: Mutex<Option<i32>>,
data_cv: Condvar,
}
impl Buffer {
fn insert(&self, val: i32) {
let mut lock = self.data.lock().expect("Can't lock");
while lock.is_some() {
lock = self.data_cv.wait(lock).expect("Can't wait");
}
*lock = Some(val);
self.data_cv.notify_one();
}
fn remove(&self) -> i32 {
let mut lock = self.data.lock().expect("Can't lock");
while lock.is_none() {
lock = self.data_cv.wait(lock).expect("Can't wait");
}
let val = lock.take().unwrap();
self.data_cv.notify_one();
val
}
}
fn producer(buffer: &Buffer) {
for i in 0..50 {
println!("p: {}", i);
buffer.insert(i);
}
}
fn consumer(buffer: &Buffer) {
for _ in 0..50 {
let val = buffer.remove();
println!("c: {}", val);
}
}
fn main() {
let buffer = Arc::new(Buffer::default());
let b = buffer.clone();
let p = thread::spawn(move || {
producer(&b);
});
let b = buffer.clone();
let c = thread::spawn(move || {
consumer(&b);
});
c.join().expect("Consumer had an error");
p.join().expect("Producer had an error");
}
If you wanted to have a bit more performance (benchmark to see if it's worth it), you could have Condvars for the "empty" and "full" conditions separately:
#[derive(Debug, Default)]
struct Buffer {
data: Mutex<Option<i32>>,
is_empty: Condvar,
is_full: Condvar,
}
impl Buffer {
fn insert(&self, val: i32) {
let mut lock = self.data.lock().expect("Can't lock");
while lock.is_some() {
lock = self.is_empty.wait(lock).expect("Can't wait");
}
*lock = Some(val);
self.is_full.notify_one();
}
fn remove(&self) -> i32 {
let mut lock = self.data.lock().expect("Can't lock");
while lock.is_none() {
lock = self.is_full.wait(lock).expect("Can't wait");
}
let val = lock.take().unwrap();
self.is_empty.notify_one();
val
}
}
To improve the concurrency performance, you can add more slots in the buffer. The following example also supports multiple producers & consumers.
use std::sync::{Arc, Condvar, Mutex, MutexGuard};
use std::thread;
const MAX: usize = 10;
struct Buffer {
inner: Mutex<BufferInner>,
fill_cond: Condvar,
empty_cond: Condvar,
}
impl Buffer {
fn new() -> Self {
Buffer {
inner: Mutex::new(BufferInner {
data: [Option::None; MAX],
filled: 0,
used: 0,
count: 0,
}),
fill_cond: Condvar::new(),
empty_cond: Condvar::new(),
}
}
}
struct BufferInner {
data: [Option<i32>; MAX],
filled: usize,
used: usize,
count: usize,
}
impl BufferInner {
fn put(&mut self, value: i32) {
self.data[self.filled] = Some(value);
self.filled = (self.filled + 1) % MAX;
self.count += 1;
}
fn get(&mut self) -> i32 {
let tmp: Option<i32> = self.data[self.used];
self.used = (self.used + 1) % MAX;
self.count -= 1;
tmp.unwrap()
}
}
fn producer(buffer: &Buffer) {
for i in 0..20 {
let mut guard = buffer.inner.lock().unwrap();
while guard.count == MAX {
guard = buffer.empty_cond.wait(guard).unwrap();
}
guard.put(i);
println!("producer: {}", i);
buffer.fill_cond.notify_one();
}
}
fn consumer(buffer: &Buffer) {
for _ in 0..20 {
let mut guard: MutexGuard<BufferInner> = buffer.inner.lock().unwrap();
while guard.count == 0_usize {
guard = buffer.fill_cond.wait(guard).unwrap();
}
let value = guard.get();
println!("consumer: {}", value);
buffer.empty_cond.notify_one();
}
}
fn main() {
let buffer = Arc::new(Buffer::new());
let buffer1 = Arc::clone(&buffer);
let p1 = thread::spawn(move || producer(&buffer));
let c1 = thread::spawn(move || consumer(&buffer1));
p1.join().unwrap();
c1.join().unwrap();
}
How can you call Linux' socketpair() command in rust?
I was not able to find it in the documentation.
This is how it works:
use std::io;
use std::libc;
use std::libc::consts::os::bsd44;
use std::libc::funcs::bsd43;
extern {
fn socketpair(
domain: libc::c_int,
typ: libc::c_int,
protocol: libc::c_int,
sv: *libc::c_int
) -> libc::c_int;
}
struct PairedStream {
socket: i32
}
impl PairedStream {
fn new(fd: i32) -> PairedStream {
PairedStream {socket: fd}
}
fn send(&self, buf: &[u8]) -> Result<(), io::IoError> {
let res = unsafe {
let ptr = buf.as_ptr() as *mut libc::c_void;
let len = buf.len() as u64;
bsd43::send(self.socket, ptr, len, 0) as uint == buf.len()
};
if res {
return Ok(());
}
else {
return Err(io::IoError {
kind: io::OtherIoError,
desc: "TODO: determine error types ;)",
detail: None,
})
}
}
fn read(&self, buf: &mut [u8]) -> Result<uint, io::IoError> {
let len = unsafe {
let ptr = buf.as_ptr() as *mut libc::c_void;
let len = buf.len() as u64;
bsd43::recv(self.socket, ptr, len, 0)
};
if len == -1 {
return Err(io::IoError {
kind: io::OtherIoError,
desc: "TODO: determine error types ;)",
detail: None,
})
}
else {
return Ok(len as uint);
}
}
}
struct SocketPair;
impl SocketPair {
fn new() -> (Result<(PairedStream, PairedStream), io::IoError>) {
let AF_LOCAL = 1;
let sv: [i32, ..2] = [-1, -1];
let _type = bsd44::SOCK_DGRAM;
let res = unsafe {
socketpair(AF_LOCAL, _type, 0, sv.as_ptr()) == 0
};
if res {
let s1 = PairedStream::new(sv[0]);
let s2 = PairedStream::new(sv[1]);
return Ok((s1, s2));
}
else {
return Err(io::IoError {
kind: io::OtherIoError,
desc: "TODO: determine error types ;)",
detail: None,
})
}
}
}
fn main() {
let sockets = SocketPair::new();
match sockets {
Ok((s1, s2)) => {
let mut buf = [9,8,7,6,5,4,3,2,1];
s1.send([1,2,3,4,5,6,7,8,9]);
s2.read(buf);
println!("{} {}", buf[0], buf[8])
}
Err(ioerr) => {}
}
}