rust lockfree ringbuffer can't work on release mode - rust

I implement a lockfree ringbuffer, and then i test for debug is ok, but in release mode it can't work allways.
use std::path::Display;
use std::sync::Arc;
#[derive(Debug)]
pub struct RingBuffer<T, const m_size: usize> {
idx_head: usize,
idx_tail: usize,
m_data: [T; m_size],
}
pub trait Queue<T> {
fn new_empty() -> Self;
fn push(&mut self, value: T) -> bool;
fn pop(&mut self) -> Option<&T>;
fn is_full(&self) -> bool;
fn is_empty(&self) -> bool;
}
impl<T, const Size: usize> Queue<T> for RingBuffer<T, Size>
{
fn new_empty() -> Self {
RingBuffer::<T, Size> {
idx_head: 0,
idx_tail: 0,
m_data: array_init::array_init(|_| {
unsafe {
std::mem::zeroed()
}
}),
}
}
fn push(&mut self, value: T) -> bool {
let mut head = self.idx_head + 1;
if head == Size {
head = 0;
}
if head == self.idx_tail {
return false;
}
self.m_data[self.idx_head] = value;
self.idx_head = head;
return true;
}
fn pop(&mut self) -> Option<&T> {
let mut tail = self.idx_tail;
if self.idx_head == tail {
return None;
}
let res = &self.m_data[tail];
tail += 1;
if tail == Size {
tail = 0;
}
self.idx_tail = tail;
return Some(res);
}
fn is_full(&self) -> bool {
self.idx_tail == (self.idx_head + 1) % Size
}
fn is_empty(&self) -> bool {
self.idx_head == self.idx_tail
}
}
pub struct SharedRingBuffer<T, const m_size: usize> {
pub ringbuffer: Arc<RingBuffer<T, m_size>>,
}
impl<T, const Size: usize> Clone for SharedRingBuffer<T, Size> {
fn clone(&self) -> Self {
Self {
ringbuffer: self.ringbuffer.clone(),
}
}
}
impl<T, const Size: usize, > Queue<T> for SharedRingBuffer<T, Size> {
fn new_empty() -> Self {
Self {
ringbuffer: Arc::new(RingBuffer::<T, Size>::new_empty()),
}
}
fn push(&mut self, value: T) -> bool {
unsafe {
(*Arc::get_mut_unchecked(&mut self.ringbuffer)).push(value)
}
}
fn pop(&mut self) -> Option<&T> {
unsafe {
(*Arc::get_mut_unchecked(&mut self.ringbuffer)).pop()
}
}
fn is_full(&self) -> bool {
self.ringbuffer.is_full()
}
fn is_empty(&self) -> bool {
self.ringbuffer.is_empty()
}
}
////////////////////// for test//////////////////////////
fn test_speed1() {
let mut q: SharedRingBuffer<i32, 8> = SharedRingBuffer::new_empty();
let mut t0 = std::time::SystemTime::now();
let t = {
let mut q = q.clone();
std::thread::spawn(move || {
loop {
let t = match q.pop() {
None => {
// std::thread::sleep(Duration::from_millis(10));
continue;
}
Some(res) => res
};
if *t == -1 {
break;
}
std::thread::sleep(Duration::from_millis(1));
}
let now = std::time::SystemTime::now();
println!("res: {}", now.duration_since(t0).unwrap().as_millis());
})
};
for i in 0..99 {
loop {
if q.push(i) {
// std::thread::sleep(Duration::from_millis(10));
break;
}
}
}
q.push(-1);
t.join().unwrap();
}
When i addition std::thread::sleep(Duration::from_millis(10)) for q.push and q.pop method it is work well.
rustc 1.67.0-nightly (95a3a7277 2022-10-31)
binary: rustc
commit-hash: 95a3a7277b44bbd2dd3485703d9a05f64652b60e
commit-date: 2022-10-31
host: x86_64-pc-windows-msvc
release: 1.67.0-nightly
LLVM version: 15.0.4
I expect the RingBuffer can work well.
The equivalent code is:
fn test_speed2() {
let (send, recv) = channel::<i32>();
let mut is_run = SharedValue::new(true);
let mut t0 = std::time::SystemTime::now();
let t = {
let is_run = is_run.clone();
std::thread::spawn(move || {
loop {
let t = match recv.recv() {
Err(e) => {
break;
}
Ok(res) => res
};
if t == -1 {
break;
}
std::thread::sleep(Duration::from_millis(1));
}
let now = std::time::SystemTime::now();
// println!("res: {}", now.duration_since(t0).unwrap().as_millis());
})
};
for i in 0..99 {
send.send(i).unwrap();
}
send.send(-1).unwrap();
t.join().unwrap();
}
I hope ringbuffer can replace channel to communicate between two threads,Because ringbuffer is lockfree and faster.

Your code causes undefined behavior by creating two mutable references to the same object at the same time via Arc::get_mut_unchecked(). It looks like this was even your intention, but it is blatantly violating Rust's rules. Even when using unsafe, you cannot violate the requirement that mutable references are exclusive.
Running your code with cargo miri reports this undefined behavior:
error: Undefined Behavior: Data race detected between Read on thread `<unnamed>` and Write on thread `main` at alloc1894+0x10
--> bar/src/main.rs:45:12
|
45 | if self.idx_head == tail {
| ^^^^^^^^^^^^^ Data race detected between Read on thread `<unnamed>` and Write on thread `main` at alloc1894+0x10
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE:
= note: inside `<RingBuffer<i32, 8> as Queue<i32>>::pop` at bar/src/main.rs:45:12
note: inside `<SharedRingBuffer<i32, 8> as Queue<i32>>::pop` at bar/src/main.rs:89:18
--> bar/src/main.rs:89:18
|
89 | unsafe { (*Arc::get_mut_unchecked(&mut self.ringbuffer)).pop() }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
note: inside closure at bar/src/main.rs:108:31
--> bar/src/main.rs:108:31
|
108 | let t = match q.pop() {
| ^^^^^^^
You will need to rethink your design. You'll probably need a foundation like this to make it safe to modify between threads:
use std::cell::UnsafeCell;
use std::mem::MaybeUninit;
use std::sync::atomic::AtomicUsize;
pub struct RingBuffer<T, const SIZE: usize> {
idx_head: AtomicUsize,
idx_tail: AtomicUsize,
m_data: [UnsafeCell<MaybeUninit<T>>; SIZE],
}

This is actually caused by the CPU cache,The solution is as follows:
fn push(&mut self, value: T) -> bool {
let mut head = unsafe {
std::ptr::read_volatile(&self.idx_head) + 1
};
let tail = unsafe {
std::ptr::read_volatile(&self.idx_tail)
};
if head == Size {
head = 0;
}
if head == tail {
return false;
}
self.m_data[self.idx_head] = value;
unsafe {
std::ptr::write_volatile(&mut self.idx_head, head);
}
return true;
}
fn pop(&mut self) -> Option<&T> {
let mut tail = unsafe {
std::ptr::read_volatile(&self.idx_tail)
};
let head = unsafe {
std::ptr::read_volatile(&self.idx_head)
};
if head == tail {
return None;
}
let res = &self.m_data[tail];
tail += 1;
if tail == Size {
tail = 0;
}
unsafe {
std::ptr::write_volatile(&mut self.idx_tail, tail);
}
return Some(res);
}

Related

ConnectionReset error message while receiving data through socket

I'm new to Rust and have implemented a network receiver thread where in I'm testing my implementation by sending data to the same socket address the receiver socket is bound to(making use of dynamic binding by binding the socket to port 0). But I'm getting thread 'google' panicked at 'Didn't receive data: Os { code: 10054, kind: ConnectionReset, message: "An existing connection was forcibly closed by the remote host." }', src\ethernet_interface.rs:62:42. This is my code
src/main.rs
mod ethernet_interface;
mod WWW_sim_interface;
use crate::WWW_sim_interface::WWWSimInterface;
use std::{thread, time};
fn main() {
let www_sim_interface = WWWSimInterface::new(String::from("google"));
let mut www_sim_interface_runnable = www_sim_interface.start();
let two_seconds = time::Duration::from_secs(2);
thread::sleep(two_seconds);
www_sim_interface_runnable.terminate_WWW_sim();
www_sim_interface_runnable.join_handle.join();
}
src/WWW_sim_interface.rs
use std::net::{SocketAddr, IpAddr, Ipv4Addr};
use std::thread;
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
use std::mem;
use crate::ethernet_interface::EthernetInterface;
pub struct WWWSimInterface {
target_ID: String,
terminate_flag: AtomicBool,
eth_interface: Option<EthernetInterface>,
}
pub struct RunningWWWSimInterface {
pub join_handle: thread::JoinHandle<()>,
WWW_sim_interface: Arc<WWWSimInterface>,
}
impl WWWSimInterface {
pub fn new(target_ID: String) -> WWWSimInterface {
let mut WWW_sim_interface = WWWSimInterface {
target_ID: target_ID,
terminate_flag: AtomicBool::new(false),
eth_interface: Some(EthernetInterface::new()),
};
WWW_sim_interface.eth_interface.as_mut().expect("Error").setup_receiver(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0));
WWW_sim_interface
}
pub fn start(self) -> RunningWWWSimInterface {
let WWW_sim_interface = Arc::new(self);
let join_handle = {
let WWW_sim_interface = WWW_sim_interface.clone();
thread::Builder::new().name(WWW_sim_interface.target_ID.clone()).spawn(move || WWW_sim_interface.run()).ok().unwrap()
};
RunningWWWSimInterface {
join_handle,
WWW_sim_interface,
}
}
pub fn run(&self) {
let mut buff: [u8; 2048] = [0; 2048];
let mut msg_ID: u16;
println!("started receiver thread");
while !self.terminate_flag.load(Ordering::Relaxed) {
let data_len = self.eth_interface.as_ref().expect("Error").recv_data(&mut buff);
if data_len < 2 {
continue;
}
let vec_buff = buff[..data_len].to_vec();
let (int_bytes, rest) = buff.split_at(mem::size_of::<u16>());
msg_ID = u16::from_be_bytes(int_bytes.try_into().unwrap());
if msg_ID == 1234 {
break;
}
}
}
}
impl RunningWWW_simInterface {
pub fn terminate_WWWSim(&mut self) {
self.WWW_sim_interface.terminate_flag.store(true, Ordering::Relaxed);
let msg_ID: u16 = 1234;
// self.WWW_sim_interface.eth_interface.as_ref().expect("Error").send_data(&msg_ID.to_be_bytes());
self.WWW_sim_interface.eth_interface.as_ref().expect("Error").send_self(&msg_ID.to_be_bytes());
}
}
src/ethernet_interface.rs
use std::net::{UdpSocket, SocketAddr, IpAddr, Ipv4Addr, Ipv6Addr};
use std::net::IpAddr::V4;
use std::net::IpAddr::V6;
pub struct EthernetInterface {
socket: Option<UdpSocket>,
recv_IP_sock_addr: Option<SocketAddr>,
send_IP_sock_addr: Option<SocketAddr>,
}
impl EthernetInterface {
pub fn new() -> EthernetInterface {
EthernetInterface {
socket: None,
recv_IP_sock_addr: None,
send_IP_sock_addr: None,
}
}
pub fn setup_receiver(&mut self, mut recv_IP_sock_addr: SocketAddr) {
let ip_addr = recv_IP_sock_addr.ip();
let first_octect: u8 = match ip_addr {
V4(ip4_addr) => ip4_addr.octets().to_vec()[0],
V6(ip6_addr) => ip6_addr.octets().to_vec()[0]
};
if first_octect > 223 && first_octect < 240 {
match ip_addr {
V4(ip4_addr) => recv_IP_sock_addr.set_ip(IpAddr::V4(Ipv4Addr::UNSPECIFIED)),
V6(ip6_addr) => recv_IP_sock_addr.set_ip(IpAddr::V6(Ipv6Addr::UNSPECIFIED))
};
}
self.recv_IP_sock_addr = Some(recv_IP_sock_addr);
self.socket = Some(UdpSocket::bind(self.recv_IP_sock_addr.unwrap()).unwrap());
if first_octect > 223 && first_octect < 240 {
let ip_addr = self.recv_IP_sock_addr.unwrap().ip();
match ip_addr {
V4(ip4_addr) => self.socket.as_ref().unwrap().join_multicast_v4(&ip4_addr, &Ipv4Addr::UNSPECIFIED).ok(),
V6(ip6_addr) => self.socket.as_ref().unwrap().join_multicast_v6(&ip6_addr, 0).ok()
};
}
}
pub fn setup_sender(&mut self, send_IP_sock_addr: SocketAddr) {
let ip_addr = send_IP_sock_addr.ip();
let first_octect = match ip_addr {
V4(ip4_addr) => ip4_addr.octets().to_vec()[0],
V6(ip6_addr) => ip6_addr.octets().to_vec()[0]
};
self.send_IP_sock_addr = Some(send_IP_sock_addr);
if first_octect > 223 && first_octect < 240 {
self.socket.as_ref().unwrap().set_multicast_loop_v4(false).expect("set_multicast_loop_v4 call failed");
self.socket.as_ref().unwrap().set_multicast_ttl_v4(8).expect("set_multicast_ttl_v4 call failed");
}
}
pub fn recv_data(&self, buff: &mut [u8]) -> usize {
let (number_of_bytes, src_addr) = self.socket.as_ref().unwrap().recv_from(buff)
.expect("Didn't receive data");
println!("recvd data");
number_of_bytes
}
pub fn send_data(&self, buff: &[u8]) {
self.socket.as_ref().unwrap().send_to(buff, self.send_IP_sock_addr.unwrap()).expect("couldn't send data");
}
pub fn get_host_bound_port(&self) -> u16 {
self.socket.as_ref().unwrap().local_addr().unwrap().port()
}
pub fn get_src_addr(&mut self) {
let mut buff: [u8; 2048] = [0; 2048];
let (_, src_addr) = self.socket.as_ref().unwrap().recv_from(&mut buff)
.expect("Didn't receive data");
self.send_IP_sock_addr = Some(src_addr);
}
pub fn send_self(&self, buff: &[u8]) {
self.socket.as_ref().unwrap().send_to(buff, self.recv_IP_sock_addr.unwrap()).expect("couldn't end self data");
println!("sent data");
}
}

Can't understand rust lifetime conflict

I was doing a dummy app to get a grasp on Rust concepts.
While doing an XML structure I got the error
cannot infer an appropriate lifetime for lifetime parameter in
function call due to conflicting requirements
The definition is
impl<'a> XmlFile<'a>
and
pub fn get_node<'b>(self, node: &'b [u8]) -> &'b [u8]
From what I understand, the Rust compiler does not like that the return variable can be dropped after the function ends, if the XML file drops at a different time (since they have 'a and 'b lifetimes).
But if I put the same, I get the error
lifetime 'a is already in scope
, so I don't see a way to solve the error.
Any idea what I am missing? I think I must still be lacking some Rust concept.
Edit: Misconception from my part adding the code that causes the problem
#[allow(unused_parens)]
pub struct XmlFile<'a> {
last_open_node: &'a[u8],
last_published: String,
index_pos: u64,
content: &'a[u8],
}
impl<'a> XmlFile<'a> {
pub fn new<'b: 'a>(file: &'b [u8]) -> XmlFile<'b> {
let new_xml = XmlFile {
last_open_node: &[0: u8],
last_published: "".to_string(),
index_pos: 0,
content: file,
};
return new_xml;
}
pub fn get_node<'b: 'a>(&self, node: &'b [u8]) -> &'b [u8] {
let buf_index: u64 = 0;
let has_matched: bool = false;
self.index_pos = 0;
for c in self.content {
self.index_pos += 1;
if (c == &b'<') {
buf_index = self.index_pos;
while (c != &b' ') {
for b in node {
if b == &self.content[buf_index as usize] {
has_matched = true;
buf_index += 1
} else {
has_matched = false;
continue;
}
}
if has_matched {
while(self.content[buf_index as usize] != b'>'){
buf_index+=1;
}
let r = &self.content[self.index_pos as usize..buf_index as usize];
return r;
}
}
}
}
return &[0 : u8];
}
pub fn get_rss_version<'b:'a>(&self) -> Result<u64 , &'static str>{
let found_slice = Self::get_node(&self, "rss".as_bytes());
if(found_slice != &[0:u8]){
let version_value = Self::get_value(found_slice);
if(version_value.is_ok()){
return Ok(version_value.unwrap()) ;
}
else{
return Err("Couldn't retrieve version from tag");
}
}
else{
println!("Couldn't find tag <rss");
return Err("Couldn't find tag <rss");
}
}
}
Let's look at your signature for get_node:
pub fn get_node<'b: 'a>(&mut self, node: &'b [u8]) -> &'b [u8] { ... }
and what you're actually returning within this method:
let r = &self.content[self.index_pos as usize..buf_index as usize];
return r;
The signature for get_node indicates this method will be returning a sub-slice of node, but you're actually returning a sub-slice of the XmlFile's content.
One solution to the problem is to understand that the return value isn't a part of node, but instead a part of self.content. Therefore, we can change the signature to:
pub fn get_node<'b>(&mut self, node: &'b [u8]) -> &'a [u8] { ... }
In this case we can even elide the manual specification of lifetimes entirely:
pub fn get_node(&mut self, node: &[u8]) -> &[u8] { ... }
Here's a cleaned up version of your get_node method that actually compiles:
pub fn get_node(&mut self, node: &[u8]) -> &[u8] {
let mut buf_index: u64;
let mut has_matched: bool = false;
self.index_pos = 0;
for c in self.content {
self.index_pos += 1;
if c == &b'<' {
buf_index = self.index_pos;
while c != &b' ' {
for b in node {
if b == &self.content[buf_index as usize] {
has_matched = true;
buf_index += 1
} else {
has_matched = false;
continue;
}
}
if has_matched {
while self.content[buf_index as usize] != b'>' {
buf_index += 1;
}
let r = &self.content[self.index_pos as usize..buf_index as usize];
return r;
}
}
}
}
return &[0u8];
}

PyContextProtocol example for pyo3?

In the __enter__ method I want to return an object which is accessible in Rust and Python, so that Rust is able to update values in the object and Python can read the updated values.
I would like to have something like this:
#![feature(specialization)]
use std::thread;
use pyo3::prelude::*;
use pyo3::types::{PyType, PyAny, PyDict};
use pyo3::exceptions::ValueError;
use pyo3::PyContextProtocol;
use pyo3::wrap_pyfunction;
#[pyclass]
#[derive(Debug, Clone)]
pub struct Statistics {
pub files: u32,
pub errors: Vec<String>,
}
fn counter(
root_path: &str,
statistics: &mut Statistics,
) {
statistics.files += 1;
statistics.errors.push(String::from("Foo"));
}
#[pyfunction]
pub fn count(
py: Python,
root_path: &str,
) -> PyResult<PyObject> {
let mut statistics = Statistics {
files: 0,
errors: Vec::new(),
};
let rc: std::result::Result<(), std::io::Error> = py.allow_threads(|| {
counter(root_path, &mut statistics);
Ok(())
});
let pyresult = PyDict::new(py);
match rc {
Err(e) => { pyresult.set_item("error", e.to_string()).unwrap();
return Ok(pyresult.into())
},
_ => ()
}
pyresult.set_item("files", statistics.files).unwrap();
pyresult.set_item("errors", statistics.errors).unwrap();
Ok(pyresult.into())
}
#[pyclass]
#[derive(Debug)]
pub struct Count {
root_path: String,
exit_called: bool,
thr: Option<thread::JoinHandle<()>>,
statistics: Statistics,
}
#[pymethods]
impl Count {
#[new]
fn __new__(
obj: &PyRawObject,
root_path: &str,
) {
obj.init(Count {
root_path: String::from(root_path),
exit_called: false,
thr: None,
statistics: Statistics {
files: 0,
errors: Vec::new(),
},
});
}
#[getter]
fn statistics(&self) -> PyResult<Statistics> {
Ok(Statistics { files: self.statistics.files,
errors: self.statistics.errors.to_vec(), })
}
}
#[pyproto]
impl<'p> PyContextProtocol<'p> for Count {
fn __enter__(&mut self) -> PyResult<Py<Count>> {
let gil = GILGuard::acquire();
self.thr = Some(thread::spawn(|| {
counter(self.root_path.as_ref(), &mut self.statistics)
}));
Ok(PyRefMut::new(gil.python(), *self).unwrap().into())
}
fn __exit__(
&mut self,
ty: Option<&'p PyType>,
_value: Option<&'p PyAny>,
_traceback: Option<&'p PyAny>,
) -> PyResult<bool> {
self.thr.unwrap().join();
let gil = GILGuard::acquire();
self.exit_called = true;
if ty == Some(gil.python().get_type::<ValueError>()) {
Ok(true)
} else {
Ok(false)
}
}
}
#[pymodule(count)]
fn init(_py: Python, m: &PyModule) -> PyResult<()> {
m.add_class::<Count>()?;
m.add_wrapped(wrap_pyfunction!(count))?;
Ok(())
}
But I'm getting the following error:
error[E0477]: the type `[closure#src/lib.rs:90:39: 92:10 self:&mut &'p mut Count]` does not fulfill the required lifetime
--> src/lib.rs:90:25
|
90 | self.thr = Some(thread::spawn(|| {
| ^^^^^^^^^^^^^
|
= note: type must satisfy the static lifetime
I've found a solution. The use of a guarded reference does the trick:
#![feature(specialization)]
use std::{thread, time};
use std::sync::{Arc, Mutex};
extern crate crossbeam_channel as channel;
use channel::{Sender, Receiver, TryRecvError};
use pyo3::prelude::*;
use pyo3::types::{PyType, PyAny};
use pyo3::exceptions::ValueError;
use pyo3::PyContextProtocol;
#[pyclass]
#[derive(Debug, Clone)]
pub struct Statistics {
pub files: u32,
pub errors: Vec<String>,
}
pub fn counter(
statistics: Arc<Mutex<Statistics>>,
cancel: &Receiver<()>,
) {
for _ in 1..15 {
thread::sleep(time::Duration::from_millis(100));
{
let mut s = statistics.lock().unwrap();
s.files += 1;
}
match cancel.try_recv() {
Ok(_) | Err(TryRecvError::Disconnected) => {
println!("Terminating.");
break;
}
Err(TryRecvError::Empty) => {}
}
}
{
let mut s = statistics.lock().unwrap();
s.errors.push(String::from("Foo"));
}
}
#[pyclass]
#[derive(Debug)]
pub struct Count {
exit_called: bool,
statistics: Arc<Mutex<Statistics>>,
thr: Option<thread::JoinHandle<()>>,
cancel: Option<Sender<()>>,
}
#[pymethods]
impl Count {
#[new]
fn __new__(obj: &PyRawObject) {
obj.init(Count {
exit_called: false,
statistics: Arc::new(Mutex::new(Statistics {
files: 0,
errors: Vec::new(),
})),
thr: None,
cancel: None,
});
}
#[getter]
fn statistics(&self) -> PyResult<u32> {
let s = Arc::clone(&self.statistics).lock().unwrap().files;
Ok(s)
}
}
#[pyproto]
impl<'p> PyContextProtocol<'p> for Count {
fn __enter__(&'p mut self) -> PyResult<()> {
let statistics = self.statistics.clone();
let (sender, receiver) = channel::bounded(1);
self.cancel = Some(sender);
self.thr = Some(thread::spawn(move || {
counter(statistics, &receiver)
}));
Ok(())
}
fn __exit__(
&mut self,
ty: Option<&'p PyType>,
_value: Option<&'p PyAny>,
_traceback: Option<&'p PyAny>,
) -> PyResult<bool> {
let _ = self.cancel.as_ref().unwrap().send(());
self.thr.take().map(thread::JoinHandle::join);
let gil = GILGuard::acquire();
self.exit_called = true;
if ty == Some(gil.python().get_type::<ValueError>()) {
Ok(true)
} else {
Ok(false)
}
}
}
#[pyproto]
impl pyo3::class::PyObjectProtocol for Count {
fn __str__(&self) -> PyResult<String> {
Ok(format!("{:?}", self))
}
}
#[pymodule(count)]
fn init(_py: Python, m: &PyModule) -> PyResult<()> {
m.add_class::<Count>()?;
Ok(())
}
Now I can run the following code:
import time
import count
c = count.Count()
with c:
for _ in range(5):
print(c.statistics)
time.sleep(0.1)
As the example shows thread cancelling also works, although a maybe nicer solution is using the crate thread_control.

Buffer in Rust with Mutex and Condvar

I'm trying to implement a buffer with a single consumer and a single producer. I have only used POSIX Semaphores, however, they're not available in Rust and I'm trying to implement a trivial semaphore problem with Rust sync primitives (Mutex, Condvar, Barrier, ...) but I don't want to use channels.
My code behaves too irregularly, with some cases going well and other times it just stops at some number and in other cases it just doesn't start counting.
Things appear to work better if I wait 1 second in the main thread till I send the Condvar notification but it doesn't guarantee that it's not going to enter a deadlock.
How can this program be fixed? Am I understanding Condvars wrong?
use std::thread;
use std::sync::{Arc, Condvar, Mutex};
struct Buffer {
is_data: Mutex<bool>,
is_data_cv: Condvar,
is_space: Mutex<bool>,
is_space_cv: Condvar,
buffer: Mutex<i32>,
}
fn producer(buffer: Arc<Buffer>) {
for i in 0..50 {
loop {
let mut is_space = buffer
.is_space_cv
.wait(buffer.is_space.lock().unwrap())
.unwrap();
if *is_space {
{
let mut hueco = buffer.buffer.lock().unwrap();
*hueco = i;
}
*is_space = false;
{
let mut is_data = buffer.is_data.lock().unwrap();
*is_data = true;
}
buffer.is_data_cv.notify_one();
break;
}
}
}
}
fn consumer(buffer: Arc<Buffer>) {
for i in 0..50 {
loop {
let mut is_data = buffer
.is_data_cv
.wait(buffer.is_data.lock().unwrap())
.unwrap();
if *is_data {
{
let hueco = buffer.buffer.lock().unwrap();
println!("{}", *hueco);
}
*is_data = false;
{
let mut is_space = buffer.is_space.lock().unwrap();
*is_space = true;
}
buffer.is_space_cv.notify_one();
break;
}
}
}
}
fn main() {
let buffer = Arc::new(Buffer {
is_data: Mutex::new(false),
is_data_cv: Condvar::new(),
is_space: Mutex::new(true),
is_space_cv: Condvar::new(),
buffer: Mutex::new(0),
});
let b = buffer.clone();
let p = thread::spawn(move || {
producer(b);
});
let b = buffer.clone();
let c = thread::spawn(move || {
consumer(b);
});
//thread::sleep_ms(1000);
buffer.is_space_cv.notify_one();
c.join();
}
I would encourage you to create smaller methods and reuse existing Rust types such as Option. This will allow you to simplify your code quite a bit — only one Mutex and one Condvar:
use std::thread;
use std::sync::{Arc, Condvar, Mutex};
#[derive(Debug, Default)]
struct Buffer {
data: Mutex<Option<i32>>,
data_cv: Condvar,
}
impl Buffer {
fn insert(&self, val: i32) {
let mut lock = self.data.lock().expect("Can't lock");
while lock.is_some() {
lock = self.data_cv.wait(lock).expect("Can't wait");
}
*lock = Some(val);
self.data_cv.notify_one();
}
fn remove(&self) -> i32 {
let mut lock = self.data.lock().expect("Can't lock");
while lock.is_none() {
lock = self.data_cv.wait(lock).expect("Can't wait");
}
let val = lock.take().unwrap();
self.data_cv.notify_one();
val
}
}
fn producer(buffer: &Buffer) {
for i in 0..50 {
println!("p: {}", i);
buffer.insert(i);
}
}
fn consumer(buffer: &Buffer) {
for _ in 0..50 {
let val = buffer.remove();
println!("c: {}", val);
}
}
fn main() {
let buffer = Arc::new(Buffer::default());
let b = buffer.clone();
let p = thread::spawn(move || {
producer(&b);
});
let b = buffer.clone();
let c = thread::spawn(move || {
consumer(&b);
});
c.join().expect("Consumer had an error");
p.join().expect("Producer had an error");
}
If you wanted to have a bit more performance (benchmark to see if it's worth it), you could have Condvars for the "empty" and "full" conditions separately:
#[derive(Debug, Default)]
struct Buffer {
data: Mutex<Option<i32>>,
is_empty: Condvar,
is_full: Condvar,
}
impl Buffer {
fn insert(&self, val: i32) {
let mut lock = self.data.lock().expect("Can't lock");
while lock.is_some() {
lock = self.is_empty.wait(lock).expect("Can't wait");
}
*lock = Some(val);
self.is_full.notify_one();
}
fn remove(&self) -> i32 {
let mut lock = self.data.lock().expect("Can't lock");
while lock.is_none() {
lock = self.is_full.wait(lock).expect("Can't wait");
}
let val = lock.take().unwrap();
self.is_empty.notify_one();
val
}
}
To improve the concurrency performance, you can add more slots in the buffer. The following example also supports multiple producers & consumers.
use std::sync::{Arc, Condvar, Mutex, MutexGuard};
use std::thread;
const MAX: usize = 10;
struct Buffer {
inner: Mutex<BufferInner>,
fill_cond: Condvar,
empty_cond: Condvar,
}
impl Buffer {
fn new() -> Self {
Buffer {
inner: Mutex::new(BufferInner {
data: [Option::None; MAX],
filled: 0,
used: 0,
count: 0,
}),
fill_cond: Condvar::new(),
empty_cond: Condvar::new(),
}
}
}
struct BufferInner {
data: [Option<i32>; MAX],
filled: usize,
used: usize,
count: usize,
}
impl BufferInner {
fn put(&mut self, value: i32) {
self.data[self.filled] = Some(value);
self.filled = (self.filled + 1) % MAX;
self.count += 1;
}
fn get(&mut self) -> i32 {
let tmp: Option<i32> = self.data[self.used];
self.used = (self.used + 1) % MAX;
self.count -= 1;
tmp.unwrap()
}
}
fn producer(buffer: &Buffer) {
for i in 0..20 {
let mut guard = buffer.inner.lock().unwrap();
while guard.count == MAX {
guard = buffer.empty_cond.wait(guard).unwrap();
}
guard.put(i);
println!("producer: {}", i);
buffer.fill_cond.notify_one();
}
}
fn consumer(buffer: &Buffer) {
for _ in 0..20 {
let mut guard: MutexGuard<BufferInner> = buffer.inner.lock().unwrap();
while guard.count == 0_usize {
guard = buffer.fill_cond.wait(guard).unwrap();
}
let value = guard.get();
println!("consumer: {}", value);
buffer.empty_cond.notify_one();
}
}
fn main() {
let buffer = Arc::new(Buffer::new());
let buffer1 = Arc::clone(&buffer);
let p1 = thread::spawn(move || producer(&buffer));
let c1 = thread::spawn(move || consumer(&buffer1));
p1.join().unwrap();
c1.join().unwrap();
}

Using socketpair() under Rust

How can you call Linux' socketpair() command in rust?
I was not able to find it in the documentation.
This is how it works:
use std::io;
use std::libc;
use std::libc::consts::os::bsd44;
use std::libc::funcs::bsd43;
extern {
fn socketpair(
domain: libc::c_int,
typ: libc::c_int,
protocol: libc::c_int,
sv: *libc::c_int
) -> libc::c_int;
}
struct PairedStream {
socket: i32
}
impl PairedStream {
fn new(fd: i32) -> PairedStream {
PairedStream {socket: fd}
}
fn send(&self, buf: &[u8]) -> Result<(), io::IoError> {
let res = unsafe {
let ptr = buf.as_ptr() as *mut libc::c_void;
let len = buf.len() as u64;
bsd43::send(self.socket, ptr, len, 0) as uint == buf.len()
};
if res {
return Ok(());
}
else {
return Err(io::IoError {
kind: io::OtherIoError,
desc: "TODO: determine error types ;)",
detail: None,
})
}
}
fn read(&self, buf: &mut [u8]) -> Result<uint, io::IoError> {
let len = unsafe {
let ptr = buf.as_ptr() as *mut libc::c_void;
let len = buf.len() as u64;
bsd43::recv(self.socket, ptr, len, 0)
};
if len == -1 {
return Err(io::IoError {
kind: io::OtherIoError,
desc: "TODO: determine error types ;)",
detail: None,
})
}
else {
return Ok(len as uint);
}
}
}
struct SocketPair;
impl SocketPair {
fn new() -> (Result<(PairedStream, PairedStream), io::IoError>) {
let AF_LOCAL = 1;
let sv: [i32, ..2] = [-1, -1];
let _type = bsd44::SOCK_DGRAM;
let res = unsafe {
socketpair(AF_LOCAL, _type, 0, sv.as_ptr()) == 0
};
if res {
let s1 = PairedStream::new(sv[0]);
let s2 = PairedStream::new(sv[1]);
return Ok((s1, s2));
}
else {
return Err(io::IoError {
kind: io::OtherIoError,
desc: "TODO: determine error types ;)",
detail: None,
})
}
}
}
fn main() {
let sockets = SocketPair::new();
match sockets {
Ok((s1, s2)) => {
let mut buf = [9,8,7,6,5,4,3,2,1];
s1.send([1,2,3,4,5,6,7,8,9]);
s2.read(buf);
println!("{} {}", buf[0], buf[8])
}
Err(ioerr) => {}
}
}

Resources