In the __enter__ method I want to return an object which is accessible in Rust and Python, so that Rust is able to update values in the object and Python can read the updated values.
I would like to have something like this:
#![feature(specialization)]
use std::thread;
use pyo3::prelude::*;
use pyo3::types::{PyType, PyAny, PyDict};
use pyo3::exceptions::ValueError;
use pyo3::PyContextProtocol;
use pyo3::wrap_pyfunction;
#[pyclass]
#[derive(Debug, Clone)]
pub struct Statistics {
pub files: u32,
pub errors: Vec<String>,
}
fn counter(
root_path: &str,
statistics: &mut Statistics,
) {
statistics.files += 1;
statistics.errors.push(String::from("Foo"));
}
#[pyfunction]
pub fn count(
py: Python,
root_path: &str,
) -> PyResult<PyObject> {
let mut statistics = Statistics {
files: 0,
errors: Vec::new(),
};
let rc: std::result::Result<(), std::io::Error> = py.allow_threads(|| {
counter(root_path, &mut statistics);
Ok(())
});
let pyresult = PyDict::new(py);
match rc {
Err(e) => { pyresult.set_item("error", e.to_string()).unwrap();
return Ok(pyresult.into())
},
_ => ()
}
pyresult.set_item("files", statistics.files).unwrap();
pyresult.set_item("errors", statistics.errors).unwrap();
Ok(pyresult.into())
}
#[pyclass]
#[derive(Debug)]
pub struct Count {
root_path: String,
exit_called: bool,
thr: Option<thread::JoinHandle<()>>,
statistics: Statistics,
}
#[pymethods]
impl Count {
#[new]
fn __new__(
obj: &PyRawObject,
root_path: &str,
) {
obj.init(Count {
root_path: String::from(root_path),
exit_called: false,
thr: None,
statistics: Statistics {
files: 0,
errors: Vec::new(),
},
});
}
#[getter]
fn statistics(&self) -> PyResult<Statistics> {
Ok(Statistics { files: self.statistics.files,
errors: self.statistics.errors.to_vec(), })
}
}
#[pyproto]
impl<'p> PyContextProtocol<'p> for Count {
fn __enter__(&mut self) -> PyResult<Py<Count>> {
let gil = GILGuard::acquire();
self.thr = Some(thread::spawn(|| {
counter(self.root_path.as_ref(), &mut self.statistics)
}));
Ok(PyRefMut::new(gil.python(), *self).unwrap().into())
}
fn __exit__(
&mut self,
ty: Option<&'p PyType>,
_value: Option<&'p PyAny>,
_traceback: Option<&'p PyAny>,
) -> PyResult<bool> {
self.thr.unwrap().join();
let gil = GILGuard::acquire();
self.exit_called = true;
if ty == Some(gil.python().get_type::<ValueError>()) {
Ok(true)
} else {
Ok(false)
}
}
}
#[pymodule(count)]
fn init(_py: Python, m: &PyModule) -> PyResult<()> {
m.add_class::<Count>()?;
m.add_wrapped(wrap_pyfunction!(count))?;
Ok(())
}
But I'm getting the following error:
error[E0477]: the type `[closure#src/lib.rs:90:39: 92:10 self:&mut &'p mut Count]` does not fulfill the required lifetime
--> src/lib.rs:90:25
|
90 | self.thr = Some(thread::spawn(|| {
| ^^^^^^^^^^^^^
|
= note: type must satisfy the static lifetime
I've found a solution. The use of a guarded reference does the trick:
#![feature(specialization)]
use std::{thread, time};
use std::sync::{Arc, Mutex};
extern crate crossbeam_channel as channel;
use channel::{Sender, Receiver, TryRecvError};
use pyo3::prelude::*;
use pyo3::types::{PyType, PyAny};
use pyo3::exceptions::ValueError;
use pyo3::PyContextProtocol;
#[pyclass]
#[derive(Debug, Clone)]
pub struct Statistics {
pub files: u32,
pub errors: Vec<String>,
}
pub fn counter(
statistics: Arc<Mutex<Statistics>>,
cancel: &Receiver<()>,
) {
for _ in 1..15 {
thread::sleep(time::Duration::from_millis(100));
{
let mut s = statistics.lock().unwrap();
s.files += 1;
}
match cancel.try_recv() {
Ok(_) | Err(TryRecvError::Disconnected) => {
println!("Terminating.");
break;
}
Err(TryRecvError::Empty) => {}
}
}
{
let mut s = statistics.lock().unwrap();
s.errors.push(String::from("Foo"));
}
}
#[pyclass]
#[derive(Debug)]
pub struct Count {
exit_called: bool,
statistics: Arc<Mutex<Statistics>>,
thr: Option<thread::JoinHandle<()>>,
cancel: Option<Sender<()>>,
}
#[pymethods]
impl Count {
#[new]
fn __new__(obj: &PyRawObject) {
obj.init(Count {
exit_called: false,
statistics: Arc::new(Mutex::new(Statistics {
files: 0,
errors: Vec::new(),
})),
thr: None,
cancel: None,
});
}
#[getter]
fn statistics(&self) -> PyResult<u32> {
let s = Arc::clone(&self.statistics).lock().unwrap().files;
Ok(s)
}
}
#[pyproto]
impl<'p> PyContextProtocol<'p> for Count {
fn __enter__(&'p mut self) -> PyResult<()> {
let statistics = self.statistics.clone();
let (sender, receiver) = channel::bounded(1);
self.cancel = Some(sender);
self.thr = Some(thread::spawn(move || {
counter(statistics, &receiver)
}));
Ok(())
}
fn __exit__(
&mut self,
ty: Option<&'p PyType>,
_value: Option<&'p PyAny>,
_traceback: Option<&'p PyAny>,
) -> PyResult<bool> {
let _ = self.cancel.as_ref().unwrap().send(());
self.thr.take().map(thread::JoinHandle::join);
let gil = GILGuard::acquire();
self.exit_called = true;
if ty == Some(gil.python().get_type::<ValueError>()) {
Ok(true)
} else {
Ok(false)
}
}
}
#[pyproto]
impl pyo3::class::PyObjectProtocol for Count {
fn __str__(&self) -> PyResult<String> {
Ok(format!("{:?}", self))
}
}
#[pymodule(count)]
fn init(_py: Python, m: &PyModule) -> PyResult<()> {
m.add_class::<Count>()?;
Ok(())
}
Now I can run the following code:
import time
import count
c = count.Count()
with c:
for _ in range(5):
print(c.statistics)
time.sleep(0.1)
As the example shows thread cancelling also works, although a maybe nicer solution is using the crate thread_control.
Related
I don't know what to do next. It looks like I misunderstand something, or maybe I have not learned some critical topic.
use std::sync::Arc;
use reqwest::{Error, Response}; // 0.11.4
use tokio::sync::mpsc::{self, Receiver, Sender}; // 1.9.0
pub struct Task {
pub id: u32,
pub url: String,
}
pub enum Message {
Failure(Task, Error),
Success(Task, Response),
}
struct State {
client: reqwest::Client,
res_tx: Sender<Message>,
res_rx: Receiver<Message>,
}
pub struct Proxy {
state: Arc<State>,
max_rps: u16,
max_pending: u16,
id: u32,
parent_tx: Sender<String>,
}
async fn send_msg<T>(tx: &Sender<T>, msg: T) {
match tx.send(msg).await {
Err(error) => {
eprintln!("{}", error)
}
_ => (),
};
}
impl Proxy {
// Starts loop for input channel
async fn start_chin(&mut self) -> Sender<Task> {
let (chin_tx, mut chin_rx) = mpsc::channel::<Task>(self.max_pending as usize + 1 as usize);
let state_outer = self.state.clone();
tokio::spawn(async move {
loop {
match chin_rx.recv().await {
Some(task) => {
let res_tx = state_outer.res_tx.clone();
let state = state_outer.clone();
tokio::spawn(async move {
match state.client.get(&task.url).send().await {
Ok(res) => send_msg(&res_tx, Message::Success(task, res)).await,
Err(err) => send_msg(&res_tx, Message::Failure(task, err)).await,
}
});
}
None => (),
}
}
});
chin_tx
}
async fn start_chres(&self) {
let state = self.state.clone();
tokio::spawn(async move {
loop {
match state.res_rx.recv().await { // LINE PRODUCES ERROR
Some(task) => {}
None => (),
}
}
});
}
}
impl Proxy {
pub fn new(
id: u32,
parent_tx: Sender<String>,
proxy_addr: &str,
max_rps: u16,
max_pending: u16,
) -> Result<Self, Error> {
let client = reqwest::Client::builder();
if proxy_addr != "none" {
client = client.proxy(reqwest::Proxy::all(proxy_addr)?)
}
let (res_tx, res_rx) = mpsc::channel::<Message>(max_pending as usize + 1 as usize); // TODO: check size
Ok(Proxy {
id,
state: Arc::new(State {
client: client.build()?,
res_tx,
res_rx,
}),
max_rps,
max_pending,
parent_tx,
})
}
}
error[E0596]: cannot borrow data in an `Arc` as mutable
--> src/lib.rs:69:23
|
69 | match state.res_rx.recv().await {
| ^^^^^^^^^^^^ cannot borrow as mutable
|
= help: trait `DerefMut` is required to modify through a dereference, but it is not implemented for `Arc<State>`
use std::sync::Arc;
struct Something {
size: usize
}
impl Something {
fn increase(&mut self) {
self.size = self.size + 1;
}
}
fn main() {
let something = Something{size: 1};
let arc = Arc::new(something);
arc.increase();
}
gives
error[E0596]: cannot borrow data in an `Arc` as mutable
--> src/main.rs:16:5
|
16 | arc.increase();
| ^^^ cannot borrow as mutable
|
= help: trait `DerefMut` is required to modify through a dereference, but it is not implemented for `Arc<Something>`
error: aborting due to previous error; 1 warning emitted
because it tries to borrow arc as mutable. For it to happen, DerefMut would have to be implemented for Arc but it's not because Arc is not meant to be mutable.
Wraping your object in a Mutex works:
use std::sync::{Arc, Mutex};
struct Something {
size: usize
}
impl Something {
fn increase(&mut self) {
self.size = self.size + 1;
}
}
fn main() {
let something = Something{size: 1};
let arc = Arc::new(Mutex::new(something));
arc.lock().unwrap().increase();
}
Now it can be shared and can be increased.
Lucas Zanella's answer and Shepmaster's comments helped alot to refactor and simplify code. I've desided to pass ownership inside Proxy::new() function instead of using shared reference. The code became more readable, and I've avoided shared reference for mutable tokio::sync::mpsc::Receiver. Perhaps the question turned out to be too unstructured, but I came to a new approach thanks to the community. Refactored code is listed below.
use reqwest::{Client, Error, Response};
use tokio::sync::mpsc;
use tokio::sync::mpsc::{Sender, Receiver};
pub struct Task {
pub id: u32,
pub url: String,
}
pub enum Message{
Failure(Task, Error),
Success(Task, Response),
}
pub struct Proxy{
id: u32,
max_rps: u16,
max_pending: u16,
in_tx: Sender<Task>,
}
async fn send_msg<T>(tx: &Sender<T>, msg: T){
match tx.send(msg).await {
Err(error) => { eprintln!("{}", error) },
_ => (),
};
}
async fn start_loop_in(client: Client, mut in_rx: Receiver<Task>, res_tx: Sender<Message>){
loop {
if let Some(task) = in_rx.recv().await {
let client_clone = client.clone();
let res_tx_clone = res_tx.clone();
tokio::spawn(async move {
println!("SENDING: {}", &task.url); // TODO: DELETE DEBUG
match client_clone.get(&task.url).send().await {
Ok(res) => send_msg(&res_tx_clone, Message::Success(task, res)).await,
Err(err) => send_msg(&res_tx_clone, Message::Failure(task, err)).await,
}
});
}
}
}
async fn start_loop_res(mut res_rx: Receiver<Message>, out_tx: Sender<String>){
loop {
if let Some(message) = res_rx.recv().await {
match message {
Message::Success(task, res) => {
send_msg(
&out_tx,
format!("{:#?}", res.text().await.unwrap()) // TODO: change in release!
).await;
},
Message::Failure(task, err) => {
send_msg(&out_tx, err.to_string()).await;
},
}
}
}
}
impl Proxy{
pub fn new(id: u32, parent_tx: Sender<String>, proxy_addr: &str, max_rps: u16, max_pending: u16) -> Result<Self, Error> {
let mut client = Client::builder();
if proxy_addr != "none" { client = client.proxy(reqwest::Proxy::all(proxy_addr)?) }
let (res_tx, res_rx) = mpsc::channel::<Message>(max_pending as usize + 1 as usize); // TODO: check size
let client = client.build()?;
let (in_tx, in_rx) = mpsc::channel::<Task>(max_pending as usize + 1 as usize);
let res_tx_clone = res_tx.clone();
tokio::spawn(async move { start_loop_in(client, in_rx, res_tx_clone).await });
tokio::spawn(async move { start_loop_res(res_rx, parent_tx).await });
Ok(Proxy{
id,
max_rps,
max_pending,
in_tx,
})
}
pub fn get_in_tx(&self) -> Sender<Task> {
self.in_tx.clone()
}
}
I'm currently trying to wrap a C library in rust that has a few requirements. The C library can only be run on a single thread, and can only be initialized / cleaned up once on the same thread. I want something something like the following.
extern "C" {
fn init_lib() -> *mut c_void;
fn cleanup_lib(ctx: *mut c_void);
}
// This line doesn't work.
static mut CTX: Option<(ThreadId, Rc<Context>)> = None;
struct Context(*mut c_void);
impl Context {
fn acquire() -> Result<Rc<Context>, Error> {
// If CTX has a reference on the current thread, clone and return it.
// Otherwise initialize the library and set CTX.
}
}
impl Drop for Context {
fn drop(&mut self) {
unsafe { cleanup_lib(self.0); }
}
}
Anyone have a good way to achieve something like this? Every solution I try to come up with involves creating a Mutex / Arc and making the Context type Send and Sync which I don't want as I want it to remain single threaded.
A working solution I came up with was to just implement the reference counting myself, removing the need for Rc entirely.
#![feature(once_cell)]
use std::{error::Error, ffi::c_void, fmt, lazy::SyncLazy, sync::Mutex, thread::ThreadId};
extern "C" {
fn init_lib() -> *mut c_void;
fn cleanup_lib(ctx: *mut c_void);
}
#[derive(Debug)]
pub enum ContextError {
InitOnOtherThread,
}
impl fmt::Display for ContextError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
ContextError::InitOnOtherThread => {
write!(f, "Context already initialized on a different thread")
}
}
}
}
impl Error for ContextError {}
struct StaticPtr(*mut c_void);
unsafe impl Send for StaticPtr {}
static CTX: SyncLazy<Mutex<Option<(ThreadId, usize, StaticPtr)>>> =
SyncLazy::new(|| Mutex::new(None));
pub struct Context(*mut c_void);
impl Context {
pub fn acquire() -> Result<Context, ContextError> {
let mut ctx = CTX.lock().unwrap();
if let Some((id, ref_count, ptr)) = ctx.as_mut() {
if *id == std::thread::current().id() {
*ref_count += 1;
return Ok(Context(ptr.0));
}
Err(ContextError::InitOnOtherThread)
} else {
let ptr = unsafe { init_lib() };
*ctx = Some((std::thread::current().id(), 1, StaticPtr(ptr)));
Ok(Context(ptr))
}
}
}
impl Drop for Context {
fn drop(&mut self) {
let mut ctx = CTX.lock().unwrap();
let (_, ref_count, ptr) = ctx.as_mut().unwrap();
*ref_count -= 1;
if *ref_count == 0 {
unsafe {
cleanup_lib(ptr.0);
}
*ctx = None;
}
}
}
I think the most 'rustic' way to do this is with std::sync::mpsc::sync_channel and an enum describing library operations.
The only public-facing elements of this module are launch_lib(), the SafeLibRef struct (but not its internals), and the pub fn that are part of the impl SafeLibRef.
Also, this example strongly represents the philosophy that the best way to deal with global state is to not have any.
I have played fast and loose with the Result::unwrap() calls. It would be more responsible to handle error conditions better.
use std::sync::{ atomic::{ AtomicBool, Ordering }, mpsc::{ SyncSender, Receiver, sync_channel } };
use std::ffi::c_void;
extern "C" {
fn init_lib() -> *mut c_void;
fn do_op_1(ctx: *mut c_void, a: u16, b: u32, c: u64) -> f64;
fn do_op_2(ctx: *mut c_void, a: f64) -> bool;
fn cleanup_lib(ctx: *mut c_void);
}
enum LibOperation {
Op1(u16,u32,u64,SyncSender<f64>),
Op2(f64, SyncSender<bool>),
Terminate(SyncSender<()>),
}
#[derive(Clone)]
pub struct SafeLibRef(SyncSender<LibOperation>);
fn lib_thread(rx: Receiver<LibOperation>) {
static LIB_INITIALIZED: AtomicBool = AtomicBool::new(false);
if LIB_INITIALIZED.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst).is_err() {
panic!("Tried to double-initialize library!");
}
let libptr = unsafe { init_lib() };
loop {
let op = rx.recv();
if op.is_err() {
unsafe { cleanup_lib(libptr) };
break;
}
match op.unwrap() {
LibOperation::Op1(a,b,c,tx_res) => {
let res: f64 = unsafe { do_op_1(libptr, a, b, c) };
tx_res.send(res).unwrap();
},
LibOperation::Op2(a, tx_res) => {
let res: bool = unsafe { do_op_2(libptr, a) };
tx_res.send(res).unwrap();
}
LibOperation::Terminate(tx_res) => {
unsafe { cleanup_lib(libptr) };
tx_res.send(()).unwrap();
break;
}
}
}
}
/// This needs to be called no more than once.
/// The resulting SafeLibRef can be cloned and passed around.
pub fn launch_lib() -> SafeLibRef {
let (tx,rx) = sync_channel(0);
std::thread::spawn(|| lib_thread(rx));
SafeLibRef(tx)
}
// This is the interface that most of your code will use
impl SafeLibRef {
pub fn op_1(&self, a: u16, b: u32, c: u64) -> f64 {
let (res_tx, res_rx) = sync_channel(1);
self.0.send(LibOperation::Op1(a, b, c, res_tx)).unwrap();
res_rx.recv().unwrap()
}
pub fn op_2(&self, a: f64) -> bool {
let (res_tx, res_rx) = sync_channel(1);
self.0.send(LibOperation::Op2(a, res_tx)).unwrap();
res_rx.recv().unwrap()
}
pub fn terminate(&self) {
let (res_tx, res_rx) = sync_channel(1);
self.0.send(LibOperation::Terminate(res_tx)).unwrap();
res_rx.recv().unwrap();
}
}
I try to use panic::catch_unwind to catch some errors, but it seems no use, and there is an example:
rust:
use std::{sync::Mutex};
use wasm_bindgen::prelude::*;
use std::sync::PoisonError;
use std::panic;
pub struct CurrentStatus {
pub index: i32,
}
#[wasm_bindgen]
extern {
pub fn alert(s: &str);
}
impl CurrentStatus {
fn new() -> Self {
CurrentStatus {
index: 1,
}
}
fn get_index(&mut self) -> i32 {
self.index += 1;
self.index.clone()
}
fn add_index(&mut self) {
self.index += 2;
}
}
lazy_static! {
pub static ref FOO: Mutex<CurrentStatus> = Mutex::new(CurrentStatus::new());
}
unsafe impl Send for CurrentStatus {}
#[wasm_bindgen]
pub fn add_index() {
FOO.lock().unwrap_or_else(PoisonError::into_inner).add_index();
}
#[wasm_bindgen]
pub fn get_index() -> i32 {
let mut foo = FOO.lock().unwrap_or_else(PoisonError::into_inner);
let result = panic::catch_unwind(|| {
panic!("error happen!"); // original panic! code
});
if result.is_err() {
alert("panic!!!!!panic");
}
return foo.get_index();
}
js:
const js = import("../pkg/hello_wasm.js");
js.then(js => {
window.js = js;
console.log(js.get_index());
js.add_index();
});
I think it should catch the panic and I can call add_index then.
But In fact I can call neither after the panic.
I wish I can catch the panic from one function so when the users call other functions just all right..
Thanks very much
How can I construct a PlasmaContainsRequest object, since I don't know how to get a WIPOffset object to construct a PlasmaContainsRequestArgs object?
I used flatc 1.10.0 to generate this Rust code:
impl<'a> PlasmaContainsRequest<'a> {
#[inline]
pub fn init_from_table(table: flatbuffers::Table<'a>) -> Self {
PlasmaContainsRequest { _tab: table }
}
#[allow(unused_mut)]
pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>(
_fbb: &'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,
args: &'args PlasmaContainsRequestArgs<'args>,
) -> flatbuffers::WIPOffset<PlasmaContainsRequest<'bldr>> {
let mut builder = PlasmaContainsRequestBuilder::new(_fbb);
if let Some(x) = args.object_id {
builder.add_object_id(x);
}
builder.finish()
}
pub const VT_OBJECT_ID: flatbuffers::VOffsetT = 4;
#[inline]
pub fn object_id(&self) -> Option<&'a str> {
self._tab
.get::<flatbuffers::ForwardsUOffset<&str>>(PlasmaContainsRequest::VT_OBJECT_ID, None)
}
}
pub struct PlasmaContainsRequestArgs<'a> {
pub object_id: Option<flatbuffers::WIPOffset<&'a str>>,
}
impl<'a> Default for PlasmaContainsRequestArgs<'a> {
#[inline]
fn default() -> Self {
PlasmaContainsRequestArgs { object_id: None }
}
}
I am trying to access a variable inside a for loop. I can't implement Copy on the struct because it contains a String. How would I use the variable across iterations?
I get error E0382 when compiling. When I looked at the Rust documentation for the error, they mentioned using reference counting to solve the problem. Is this the only solution in my case?
#[derive(Clone)]
struct InputParser {
args: Vec<String>,
current: String,
consumed_quote: bool,
}
impl InputParser {
pub fn parse(input: String) -> Vec<String> {
let parser = InputParser {
args: Vec::new(),
current: String::new(),
consumed_quote: false,
};
for c in input.chars() {
match c {
'"' => parser.consume_quote(),
' ' => parser.consume_space(),
_ => parser.consume_char(c),
}
}
parser.end();
return parser.args;
}
pub fn consume_space(mut self) {
if !self.consumed_quote {
self.push_current();
}
}
pub fn consume_quote(mut self) {
self.consumed_quote = self.consumed_quote;
if self.consumed_quote {
self.push_current();
}
}
pub fn consume_char(mut self, c: char) {
self.current.push(c);
}
pub fn end(mut self) {
self.push_current();
}
pub fn push_current(mut self) {
if self.current.len() > 0 {
self.args.push(self.current);
self.current = String::new();
}
}
}
I want to access parser across iterations of the for loop.
[How do I] move [a] non-copyable struct across iterations
You don't, at least not trivially. Once you've moved the struct to a function, it's gone. The only way to get it back is for the function to give it back to you.
Instead, you most likely want to modify an existing struct inside the loop. You need to use a mutable reference for this:
use std::mem;
#[derive(Clone)]
struct InputParser {
args: Vec<String>,
current: String,
consumed_quote: bool,
}
impl InputParser {
fn consume_space(&mut self) {
if !self.consumed_quote {
self.push_current();
}
}
fn consume_quote(&mut self) {
self.consumed_quote = self.consumed_quote;
if self.consumed_quote {
self.push_current();
}
}
fn consume_char(&mut self, c: char) {
self.current.push(c);
}
fn end(&mut self) {
self.push_current();
}
fn push_current(&mut self) {
if self.current.len() > 0 {
let arg = mem::replace(&mut self.current, String::new());
self.args.push(arg);
}
}
}
fn parse(input: String) -> Vec<String> {
let mut parser = InputParser {
args: Vec::new(),
current: String::new(),
consumed_quote: false,
};
for c in input.chars() {
match c {
'"' => parser.consume_quote(),
' ' => parser.consume_space(),
_ => parser.consume_char(c),
}
}
parser.end();
parser.args
}
fn main() {}
Note that the previous way of taking the current argument would result in error[E0507]: cannot move out of borrowed content, so I switched to mem::replace. This prevents self.current from ever becoming an undefined value (which it was previously).
If you really want to pass everything by value, you need to return by value as well.
#[derive(Clone)]
struct InputParser {
args: Vec<String>,
current: String,
consumed_quote: bool,
}
impl InputParser {
fn consume_space(mut self) -> Self {
if !self.consumed_quote {
return self.push_current();
}
self
}
fn consume_quote(mut self) -> Self {
self.consumed_quote = self.consumed_quote;
if self.consumed_quote {
return self.push_current();
}
self
}
fn consume_char(mut self, c: char) -> Self {
self.current.push(c);
self
}
fn end(mut self) -> Self {
self.push_current()
}
fn push_current(mut self) -> Self {
if self.current.len() > 0 {
self.args.push(self.current);
self.current = String::new();
}
self
}
}
fn parse(input: String) -> Vec<String> {
let mut parser = InputParser {
args: Vec::new(),
current: String::new(),
consumed_quote: false,
};
for c in input.chars() {
parser = match c {
'"' => parser.consume_quote(),
' ' => parser.consume_space(),
_ => parser.consume_char(c),
}
}
parser = parser.end();
parser.args
}
fn main() {}
I believe this makes the API objectively worse in this case. However, you will see this style somewhat frequently with a builder. In that case, the methods tend to be chained together, so you never see a reassignment to the variable.