My test fails at "attempt to subtract with overflow" - rust

use itertools::Itertools;
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
struct Runner {
sec: u16,
}
impl Runner {
fn from(v: (u8, u8, u8)) -> Runner {
Runner {
sec: v.0 as u16 * 3600 + v.1 as u16 * 60 + v.2 as u16
}
}
}
fn parse_runner(strg: &str) -> Vec<Runner> {
strg.split(", ")
.flat_map(|personal_result| personal_result.split('|'))
.map(|x| x.parse::<u8>().unwrap())
.tuples::<(_, _, _)>()
.map(|x| Runner::from(x))
.sorted()
.collect::<Vec<Runner>>()
}
fn parse_to_format(x: u16) -> String {
let h = x / 3600;
let m = (x - 3600)/60;
let s = x % 60;
format!("{:02}|{:02}|{:02}", h, m, s)
}
fn return_stats(runners: &[Runner]) -> String {
let range: u16 = runners.last().unwrap().sec - runners.first().unwrap().sec;
let average: u16 = runners.iter().map(|&r| r.sec).sum::<u16>()/(runners.len() as u16);
let median: u16 = if runners.len()%2 != 0 {
runners.get(runners.len()/2).unwrap().sec
} else {
runners.get(runners.len()/2).unwrap().sec/2 + runners.get((runners.len()/2) + 1).unwrap().sec/2
};
format!("Range: {} Average: {} Median: {}", parse_to_format(range), parse_to_format(average), parse_to_format(median))
}
fn stati(strg: &str) -> String {
let run_vec = parse_runner(strg);
return_stats(&run_vec)
}
I cant find the mistake I made with supposedly substraction to make my code pass the test. Basically I'm trying to start with a &str like "01|15|59, 1|47|6, 01|17|20, 1|32|34, 2|3|17" and end up with another one like "Range: 00|47|18 Average: 01|35|15 Median: 01|32|34"
Sorry in advance for my mistake if it is really stupid, I've been trying to fix it for quite a while
https://www.codewars.com/kata/55b3425df71c1201a800009c/train/rust

let m = (x - 3600) / 60;
As Peter mentioned, that will indeed overflow if x is less than 3600. A u16 can not be negative.
Using integer arithmetic, here's another way of formatting seconds to hh|mm|ss and does not experience overflow:
fn seconds_to_hhmmss(mut s: u64) -> String {
let h = s / 3600;
s -= h * 3600;
let m = s / 60;
s -= m * 60;
format!("{:02}|{:02}|{:02}", h, m, s)
}

Related

How to format to other number bases besides decimal, hexadecimal? [duplicate]

Currently I'm using the following code to return a number as a binary (base 2), octal (base 8), or hexadecimal (base 16) string.
fn convert(inp: u32, out: u32, numb: &String) -> Result<String, String> {
match isize::from_str_radix(numb, inp) {
Ok(a) => match out {
2 => Ok(format!("{:b}", a)),
8 => Ok(format!("{:o}", a)),
16 => Ok(format!("{:x}", a)),
10 => Ok(format!("{}", a)),
0 | 1 => Err(format!("No base lower than 2!")),
_ => Err(format!("printing in this base is not supported")),
},
Err(e) => Err(format!(
"Could not convert {} to a number in base {}.\n{:?}\n",
numb, inp, e
)),
}
}
Now I want to replace the inner match statement so I can return the number as an arbitrarily based string (e.g. base 3.) Are there any built-in functions to convert a number into any given radix, similar to JavaScript's Number.toString() method?
For now, you cannot do it using the standard library, but you can:
use my crate radix_fmt
or roll your own implementation:
fn format_radix(mut x: u32, radix: u32) -> String {
let mut result = vec![];
loop {
let m = x % radix;
x = x / radix;
// will panic if you use a bad radix (< 2 or > 36).
result.push(std::char::from_digit(m, radix).unwrap());
if x == 0 {
break;
}
}
result.into_iter().rev().collect()
}
fn main() {
assert_eq!(format_radix(1234, 10), "1234");
assert_eq!(format_radix(1000, 10), "1000");
assert_eq!(format_radix(0, 10), "0");
}
If you wanted to eke out a little more performance, you can create a struct and implement Display or Debug for it. This avoids allocating a String. For maximum over-engineering, you can also have a stack-allocated array instead of the Vec.
Here is Boiethios' answer with these changes applied:
struct Radix {
x: i32,
radix: u32,
}
impl Radix {
fn new(x: i32, radix: u32) -> Result<Self, &'static str> {
if radix < 2 || radix > 36 {
Err("Unnsupported radix")
} else {
Ok(Self { x, radix })
}
}
}
use std::fmt;
impl fmt::Display for Radix {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut x = self.x;
// Good for binary formatting of `u128`s
let mut result = ['\0'; 128];
let mut used = 0;
let negative = x < 0;
if negative {
x*=-1;
}
let mut x = x as u32;
loop {
let m = x % self.radix;
x /= self.radix;
result[used] = std::char::from_digit(m, self.radix).unwrap();
used += 1;
if x == 0 {
break;
}
}
if negative {
write!(f, "-")?;
}
for c in result[..used].iter().rev() {
write!(f, "{}", c)?;
}
Ok(())
}
}
fn main() {
assert_eq!(Radix::new(1234, 10).to_string(), "1234");
assert_eq!(Radix::new(1000, 10).to_string(), "1000");
assert_eq!(Radix::new(0, 10).to_string(), "0");
}
This could still be optimized by:
creating an ASCII array instead of a char array
not zero-initializing the array
Since these avenues require unsafe or an external crate like arraybuf, I have not included them. You can see sample code in internal implementation details of the standard library.
Here is an extended solution based on the first comment which does not bind the parameter x to be a u32:
fn format_radix(mut x: u128, radix: u32) -> String {
let mut result = vec![];
loop {
let m = x % radix as u128;
x = x / radix as u128;
// will panic if you use a bad radix (< 2 or > 36).
result.push(std::char::from_digit(m as u32, radix).unwrap());
if x == 0 {
break;
}
}
result.into_iter().rev().collect()
}
This is faster than the other answer:
use std::char::from_digit;
fn encode(mut n: u32, r: u32) -> Option<String> {
let mut s = String::new();
loop {
if let Some(c) = from_digit(n % r, r) {
s.insert(0, c)
} else {
return None
}
n /= r;
if n == 0 {
break
}
}
Some(s)
}
Note I also tried these, but they were slower:
https://doc.rust-lang.org/std/collections/struct.VecDeque.html#method.push_front
https://doc.rust-lang.org/std/string/struct.String.html#method.push
https://doc.rust-lang.org/std/vec/struct.Vec.html#method.insert

How to safely convert float to int in Rust

How can I safely convert a floating point type (e.g. f64) to and integer type (e.g. u64)? In other words I want to convert the number but only if it actually can be represented by the target type.
I found a couple of questions that don't cover this and are not duplicates:
Convert float to integer in Rust
How do I convert between numeric types safely and idiomatically?
The solution is not to use as - that performs a saturating cast. Also u64::try_from(f64) is not implemented.
The closest seems to be f64::to_int_unchecked() but unfortunately it's unsafe. You can easily check the first two safety requirements (not NaN or infinity), but the third is a bit tedious: Be representable in the return type Int, after truncating off its fractional part.
The best I can come up with is to use as to convert it back to f64 and check equality, i.e.
fn convert(x: f64) -> Option<u64> {
let y = x as u64;
if y as f64 == x {
Some(y)
} else {
None
}
}
Is that the best option? Is it implemented anywhere?
For fun, I made an implementation based on the raw f64 bits:
const F64_BITS: u64 = 64;
const F64_EXPONENT_BITS: u64 = 11;
const F64_EXPONENT_MAX: u64 = (1 << F64_EXPONENT_BITS) - 1;
const F64_EXPONENT_BIAS: u64 = 1023;
const F64_FRACTION_BITS: u64 = 52;
pub fn f64_to_u64(f: f64) -> Option<u64> {
let bits = f.to_bits();
let sign = bits & (1 << (F64_EXPONENT_BITS + F64_FRACTION_BITS)) != 0;
let exponent = (bits >> F64_FRACTION_BITS) & ((1 << F64_EXPONENT_BITS) - 1);
let fraction = bits & ((1 << F64_FRACTION_BITS) - 1);
eprintln!("Input: {f}, bits: {bits:b}, sign: {sign}, exponent: {exponent}, fraction: {fraction}");
match (sign, exponent, fraction) {
(_, 0, 0) => {
debug_assert!(f == 0.0);
Some(0)
},
(true, _, _) => {
debug_assert!(f < 0.0);
None
},
(_, F64_EXPONENT_MAX, 0) => {
debug_assert!(f.is_infinite());
None
},
(_, F64_EXPONENT_MAX, _) => {
debug_assert!(f.is_nan());
None
},
(_, 0, _) => {
debug_assert!(f.is_subnormal());
None
},
_ => {
if exponent < F64_EXPONENT_BIAS {
debug_assert!(f < 1.0);
None
} else {
let mantissa = fraction | (1 << F64_FRACTION_BITS);
let left_shift = exponent as i64 - (F64_EXPONENT_BIAS + F64_FRACTION_BITS) as i64;
if left_shift < 0 {
let right_shift = (-left_shift) as u64;
if mantissa & (1 << right_shift - 1) != 0 {
debug_assert!(f.fract() != 0.0);
None
} else {
Some(mantissa >> right_shift)
}
} else {
if left_shift > (F64_BITS - F64_FRACTION_BITS - 1) as i64 {
debug_assert!(f > 2.0f64.powi(63));
None
} else {
Some(mantissa << left_shift)
}
}
}
},
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn zero() {
assert_eq!(f64_to_u64(0.0), Some(0));
assert_eq!(f64_to_u64(-0.0), Some(0));
}
#[test]
fn positive() {
assert_eq!(f64_to_u64(1.0), Some(1));
assert_eq!(f64_to_u64(2.0), Some(2));
assert_eq!(f64_to_u64(3.0), Some(3));
assert_eq!(f64_to_u64(2.0f64.powi(52)), Some(1 << 52));
assert_eq!(f64_to_u64(2.0f64.powi(53)), Some(1 << 53));
assert_eq!(f64_to_u64(2.0f64.powi(63)), Some(1 << 63));
assert_eq!(f64_to_u64(1.5 * 2.0f64.powi(63)), Some(11 << 62));
assert_eq!(f64_to_u64(1.75 * 2.0f64.powi(63)), Some(111 << 61));
}
#[test]
fn too_big() {
assert_eq!(f64_to_u64(2.0f64.powi(64)), None);
}
#[test]
fn fractional() {
assert_eq!(f64_to_u64(0.5), None);
assert_eq!(f64_to_u64(1.5), None);
assert_eq!(f64_to_u64(2.5), None);
}
#[test]
fn negative() {
assert_eq!(f64_to_u64(-1.0), None);
assert_eq!(f64_to_u64(-2.0), None);
assert_eq!(f64_to_u64(-3.0), None);
assert_eq!(f64_to_u64(-(2.0f64.powi(f64::MANTISSA_DIGITS as i32))), None);
}
#[test]
fn infinity() {
assert_eq!(f64_to_u64(f64::INFINITY), None);
assert_eq!(f64_to_u64(-f64::INFINITY), None);
}
#[test]
fn nan() {
assert_eq!(f64_to_u64(f64::NAN), None);
}
}
Not sure whether this is useful. It's, ahem, slightly more complex than the solutions proposed so far. It may be faster on some hardware, but I doubt it, and haven't bothered to write a benchmark.
Generally, I would defer what is best practice to the relevant lints used by Clippy. Clippy does a good job outlining the possible pitfalls of using x as y and offers possible solutions. These are all of the relevant lints I could find on the subject:
https://rust-lang.github.io/rust-clippy/master/#cast_possible_truncation
https://rust-lang.github.io/rust-clippy/master/#cast_lossless
https://rust-lang.github.io/rust-clippy/master/#cast_possible_wrap
https://rust-lang.github.io/rust-clippy/master/#cast_precision_loss
https://rust-lang.github.io/rust-clippy/master/#cast_sign_loss
However if all you want is to find an answer of mapping f64 onto u64 without any precision loss, there are two conditions you will want to check:
x is an integer
x is within the bounds of the target type
pub fn strict_f64_to_u64(x: f64) -> Option<u64> {
// Check if fractional component is 0 and that it can map to an integer in the f64
// Using fract() is equivalent to using `as u64 as f64` and checking it matches
if x.fract() == 0.0 && x >= u64::MIN as f64 && x <= u64::MAX as f64 {
return Some(x.trunc())
}
None
}

what is the difference between dirct multiplication and recursion in rust

I am new to rust and I am trying to solve this leetcode question, I wrote the following code snippet
fn main() {
println!("{}", Solution::my_pow(0.00001, 2147483647))
}
struct Solution {}
impl Solution {
pub fn my_pow(x: f64, n: i32) -> f64 {
let mut base = x;
let mut exp = n;
let mut res = 1.0;
if n < 0 {
base = 1.0 / base;
exp = -n;
}
let mut i = 1;
while exp != 0 {
let mut temp = base;
while i * 2 <= exp {
temp *= temp;
i *= 2;
}
res *= temp;
exp -= i;
i = 1;
}
res
}
}
when I run the code, it panicked and print an error message say thread 'main' panicked at 'attempt to multiply with overflow', src/main.rs:19:19, but the following code snippet
impl Solution {
pub fn my_pow(x: f64, n: i32) -> f64 {
fn pow(x: f64, res: f64, n: i64) -> f64 {
match n {
0 => res,
n if n & 1 == 1 => pow(x*x, res*x, n>>1),
_ => pow(x*x, res, n>>1)
}
}
match n {
0 => 1.0,
n if n < 0 => pow(1.0/x, 1.0, (n as i64).abs()),
_ => pow(x, 1.0, n as i64)
}
}
}
could run just as expected. I am confused. What is the difference between the two code snippets?
What is the difference between the two code snippets?
That one of them overflows and the other not. Did you consider reading the error message and wondering how that could be relevant to your code? The panic is pointing to:
while i * 2 <= exp {
You've defined i without specifying its type, meaning it's an i32 (32b signed integer, two's complement). You keep multiplying it by 2 so aside from its initial value it can only be even, and you're doing so until it's larger than exp which is initially n and thus 2147483647.
The even number larger than 2147483647 is 2147483648, which is not a valid i32, therefore it overflows. Which is what the panic tells you.

Format/convert a number to a string in any base (including bases other than decimal or hexadecimal)

Currently I'm using the following code to return a number as a binary (base 2), octal (base 8), or hexadecimal (base 16) string.
fn convert(inp: u32, out: u32, numb: &String) -> Result<String, String> {
match isize::from_str_radix(numb, inp) {
Ok(a) => match out {
2 => Ok(format!("{:b}", a)),
8 => Ok(format!("{:o}", a)),
16 => Ok(format!("{:x}", a)),
10 => Ok(format!("{}", a)),
0 | 1 => Err(format!("No base lower than 2!")),
_ => Err(format!("printing in this base is not supported")),
},
Err(e) => Err(format!(
"Could not convert {} to a number in base {}.\n{:?}\n",
numb, inp, e
)),
}
}
Now I want to replace the inner match statement so I can return the number as an arbitrarily based string (e.g. base 3.) Are there any built-in functions to convert a number into any given radix, similar to JavaScript's Number.toString() method?
For now, you cannot do it using the standard library, but you can:
use my crate radix_fmt
or roll your own implementation:
fn format_radix(mut x: u32, radix: u32) -> String {
let mut result = vec![];
loop {
let m = x % radix;
x = x / radix;
// will panic if you use a bad radix (< 2 or > 36).
result.push(std::char::from_digit(m, radix).unwrap());
if x == 0 {
break;
}
}
result.into_iter().rev().collect()
}
fn main() {
assert_eq!(format_radix(1234, 10), "1234");
assert_eq!(format_radix(1000, 10), "1000");
assert_eq!(format_radix(0, 10), "0");
}
If you wanted to eke out a little more performance, you can create a struct and implement Display or Debug for it. This avoids allocating a String. For maximum over-engineering, you can also have a stack-allocated array instead of the Vec.
Here is Boiethios' answer with these changes applied:
struct Radix {
x: i32,
radix: u32,
}
impl Radix {
fn new(x: i32, radix: u32) -> Result<Self, &'static str> {
if radix < 2 || radix > 36 {
Err("Unnsupported radix")
} else {
Ok(Self { x, radix })
}
}
}
use std::fmt;
impl fmt::Display for Radix {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut x = self.x;
// Good for binary formatting of `u128`s
let mut result = ['\0'; 128];
let mut used = 0;
let negative = x < 0;
if negative {
x*=-1;
}
let mut x = x as u32;
loop {
let m = x % self.radix;
x /= self.radix;
result[used] = std::char::from_digit(m, self.radix).unwrap();
used += 1;
if x == 0 {
break;
}
}
if negative {
write!(f, "-")?;
}
for c in result[..used].iter().rev() {
write!(f, "{}", c)?;
}
Ok(())
}
}
fn main() {
assert_eq!(Radix::new(1234, 10).to_string(), "1234");
assert_eq!(Radix::new(1000, 10).to_string(), "1000");
assert_eq!(Radix::new(0, 10).to_string(), "0");
}
This could still be optimized by:
creating an ASCII array instead of a char array
not zero-initializing the array
Since these avenues require unsafe or an external crate like arraybuf, I have not included them. You can see sample code in internal implementation details of the standard library.
Here is an extended solution based on the first comment which does not bind the parameter x to be a u32:
fn format_radix(mut x: u128, radix: u32) -> String {
let mut result = vec![];
loop {
let m = x % radix as u128;
x = x / radix as u128;
// will panic if you use a bad radix (< 2 or > 36).
result.push(std::char::from_digit(m as u32, radix).unwrap());
if x == 0 {
break;
}
}
result.into_iter().rev().collect()
}
This is faster than the other answer:
use std::char::from_digit;
fn encode(mut n: u32, r: u32) -> Option<String> {
let mut s = String::new();
loop {
if let Some(c) = from_digit(n % r, r) {
s.insert(0, c)
} else {
return None
}
n /= r;
if n == 0 {
break
}
}
Some(s)
}
Note I also tried these, but they were slower:
https://doc.rust-lang.org/std/collections/struct.VecDeque.html#method.push_front
https://doc.rust-lang.org/std/string/struct.String.html#method.push
https://doc.rust-lang.org/std/vec/struct.Vec.html#method.insert

RGB to YCbCr using SIMD vectors lose some data

I'm writing JPEG decoder/encoder in Rust and I have some problem with RGB ↔ YCbCr conversion.
My code:
use std::simd::f32x4;
fn clamp<T>(val: T, min: T, max: T) -> T
where T: PartialOrd {
if val < min { min }
else if max < val { max }
else { val }
}
// in oryginal code there are 2 methods, one for processors with SSE3 and for rest
// both do the same and give the same results
pub fn sum_f32x4(f32x4(a, b, c, d): f32x4) -> f32 {
a + b + c + d
}
pub fn rgb_to_ycbcr(r: u8, g: u8, b: u8) -> (u8, u8, u8) {
let rgb = f32x4(r as f32, g as f32, b as f32, 1.0);
let y = sum_f32x4(rgb * f32x4( 0.2990, 0.5870, 0.1140, 0.0));
let cb = sum_f32x4(rgb * f32x4(-0.1687, -0.3313, 0.5000, 128.0));
let cr = sum_f32x4(rgb * f32x4( 0.5000, -0.4187, -0.0813, 128.0));
(y as u8, cb as u8, cr as u8)
}
pub fn ycbcr_to_rgb(y: u8, cb: u8, cr: u8) -> (u8, u8, u8) {
let ycbcr = f32x4(y as f32, cb as f32 - 128.0f32, cr as f32 - 128.0f32, 0.0);
let r = sum_f32x4(ycbcr * f32x4(1.0, 0.00000, 1.40200, 0.0));
let g = sum_f32x4(ycbcr * f32x4(1.0, -0.34414, -0.71414, 0.0));
let b = sum_f32x4(ycbcr * f32x4(1.0, 1.77200, 0.00000, 0.0));
(clamp(r, 0., 255.) as u8, clamp(g, 0., 255.) as u8, clamp(b, 0., 255.) as u8)
}
fn main() {
assert_eq!(rgb_to_ycbcr( 0, 71, 171), ( 61, 189, 84));
// assert_eq!(rgb_to_ycbcr( 0, 71, 169), ( 61, 189, 84)); // will fail
// for some reason we always lose data on blue channel
assert_eq!(ycbcr_to_rgb( 61, 189, 84), ( 0, 71, 169));
}
For some reason booth tests (in comments) passes. I would rather expect that at least one of them will fail. Am I wrong? At least it should stop at some point, but when I change jpeg::color::utils::rgb_to_ycbcr(0, 71, 171) to jpeg::color::utils::rgb_to_ycbcr(0, 71, 169) then test fails as YCbCr value has changed, so I will lose my blue channel forever.
#dbaupp put the nail in the coffin with the suggestion to use round:
#![allow(unstable)]
use std::simd::{f32x4};
use std::num::Float;
fn clamp(val: f32) -> u8 {
if val < 0.0 { 0 }
else if val > 255.0 { 255 }
else { val.round() as u8 }
}
fn sum_f32x4(v: f32x4) -> f32 {
v.0 + v.1 + v.2 + v.3
}
pub fn rgb_to_ycbcr((r, g, b): (u8, u8, u8)) -> (u8, u8, u8) {
let rgb = f32x4(r as f32, g as f32, b as f32, 1.0);
let y = sum_f32x4(rgb * f32x4( 0.299000, 0.587000, 0.114000, 0.0));
let cb = sum_f32x4(rgb * f32x4(-0.168736, -0.331264, 0.500000, 128.0));
let cr = sum_f32x4(rgb * f32x4( 0.500000, -0.418688, -0.081312, 128.0));
(clamp(y), clamp(cb), clamp(cr))
}
pub fn ycbcr_to_rgb((y, cb, cr): (u8, u8, u8)) -> (u8, u8, u8) {
let ycbcr = f32x4(y as f32, cb as f32 - 128.0f32, cr as f32 - 128.0f32, 0.0);
let r = sum_f32x4(ycbcr * f32x4(1.0, 0.00000, 1.40200, 0.0));
let g = sum_f32x4(ycbcr * f32x4(1.0, -0.34414, -0.71414, 0.0));
let b = sum_f32x4(ycbcr * f32x4(1.0, 1.77200, 0.00000, 0.0));
(clamp(r), clamp(g), clamp(b))
}
fn main() {
let mut rgb = (0, 71, 16);
println!("{:?}", rgb);
for _ in 0..100 {
let yuv = rgb_to_ycbcr(rgb);
rgb = ycbcr_to_rgb(yuv);
println!("{:?}", rgb);
}
}
Note that I also increased the precision of your values in rgb_to_ycbcr from the Wikipedia page. I also clamp in both functions, as well as calling round. Now the output is:
(0u8, 71u8, 16u8)
(1u8, 72u8, 16u8)
(1u8, 72u8, 16u8)
With the last value repeating for the entire loop.

Resources