Convert Hex string into binary data into a buffer - node.js

Sometimes from network transmissions/usdb devices you receive the data has a hexadimal string eg:
"12ADFF1345"
These type of string I want somehow to be converted into a binary equivalent into a buffer, in order to perform a some mathematical or binary operations on them.
Do you know how I can achieve that?

Use the builtin Buffer class :
let buf1 = Buffer.from('12ADFF1345', 'hex');
let value = buf1.readInt32LE(0);
let value2 = buf1.readInt16LE(2);
console.log(value,value2);
>> 335523090 5119
// '13ffad12' '13FF' (LE)
>> 313392915 -237
// '12ADFF13' 'ff13' (BE)
https://nodejs.org/api/buffer.html#buffer_class_method_buffer_from_string_encoding

Yes I know how to do that, the algorithm is simple (assuming that you have no escape characters):
Split the read string into a character.
Group each character pair.
Then generate the string 0x^first_character_pair^
parseInt the string above with base 16
In other words consult the following code:
const hexStringToBinaryBuffer = (string) => {
const subStrings = Array.from(string);
let previous = null;
const bytes = [];
_.each(subStrings, (val) => {
if (previous === null) { // Converting every 2 chars as binary data
previous = val;
} else {
const value = parseInt(`0x${previous}${val}`, 16);
bytes.push(value);
previous = null;
}
});
return Buffer.from(bytes);
};
This is usefull if you pass as string the result of a Buffer.toString('hex') or equivalent method via a network socket or a usb port and the other end received it.

Related

Tourble with HEX and Ascii encoding in node js [duplicate]

I'm trying to convert a unicode string to a hexadecimal representation in javascript.
This is what I have:
function convertFromHex(hex) {
var hex = hex.toString();//force conversion
var str = '';
for (var i = 0; i < hex.length; i += 2)
str += String.fromCharCode(parseInt(hex.substr(i, 2), 16));
return str;
}
function convertToHex(str) {
var hex = '';
for(var i=0;i<str.length;i++) {
hex += ''+str.charCodeAt(i).toString(16);
}
return hex;
}
But if fails on unicode characters, like chinese;
Input:
漢字
Output:
ªo"[W
Any ideas? Can this be done in javascript?
Remember that a JavaScript code unit is 16 bits wide. Therefore the hex string form will be 4 digits per code unit.
usage:
var str = "\u6f22\u5b57"; // "\u6f22\u5b57" === "漢字"
alert(str.hexEncode().hexDecode());
String to hex form:
String.prototype.hexEncode = function(){
var hex, i;
var result = "";
for (i=0; i<this.length; i++) {
hex = this.charCodeAt(i).toString(16);
result += ("000"+hex).slice(-4);
}
return result
}
Back again:
String.prototype.hexDecode = function(){
var j;
var hexes = this.match(/.{1,4}/g) || [];
var back = "";
for(j = 0; j<hexes.length; j++) {
back += String.fromCharCode(parseInt(hexes[j], 16));
}
return back;
}
Here is a tweak of McDowell's algorithm that doesn't pad the result:
function toHex(str) {
var result = '';
for (var i=0; i<str.length; i++) {
result += str.charCodeAt(i).toString(16);
}
return result;
}
A more up to date solution, for encoding:
// This is the same for all of the below, and
// you probably won't need it except for debugging
// in most cases.
function bytesToHex(bytes) {
return Array.from(
bytes,
byte => byte.toString(16).padStart(2, "0")
).join("");
}
// You almost certainly want UTF-8, which is
// now natively supported:
function stringToUTF8Bytes(string) {
return new TextEncoder().encode(string);
}
// But you might want UTF-16 for some reason.
// .charCodeAt(index) will return the underlying
// UTF-16 code-units (not code-points!), so you
// just need to format them in whichever endian order you want.
function stringToUTF16Bytes(string, littleEndian) {
const bytes = new Uint8Array(string.length * 2);
// Using DataView is the only way to get a specific
// endianness.
const view = new DataView(bytes.buffer);
for (let i = 0; i != string.length; i++) {
view.setUint16(i, string.charCodeAt(i), littleEndian);
}
return bytes;
}
// And you might want UTF-32 in even weirder cases.
// Fortunately, iterating a string gives the code
// points, which are identical to the UTF-32 encoding,
// though you still have the endianess issue.
function stringToUTF32Bytes(string, littleEndian) {
const codepoints = Array.from(string, c => c.codePointAt(0));
const bytes = new Uint8Array(codepoints.length * 4);
// Using DataView is the only way to get a specific
// endianness.
const view = new DataView(bytes.buffer);
for (let i = 0; i != codepoints.length; i++) {
view.setUint32(i, codepoints[i], littleEndian);
}
return bytes;
}
Examples:
bytesToHex(stringToUTF8Bytes("hello 漢字 👍"))
// "68656c6c6f20e6bca2e5ad9720f09f918d"
bytesToHex(stringToUTF16Bytes("hello 漢字 👍", false))
// "00680065006c006c006f00206f225b570020d83ddc4d"
bytesToHex(stringToUTF16Bytes("hello 漢字 👍", true))
// "680065006c006c006f002000226f575b20003dd84ddc"
bytesToHex(stringToUTF32Bytes("hello 漢字 👍", false))
// "00000068000000650000006c0000006c0000006f0000002000006f2200005b57000000200001f44d"
bytesToHex(stringToUTF32Bytes("hello 漢字 👍", true))
// "68000000650000006c0000006c0000006f00000020000000226f0000575b0000200000004df40100"
For decoding, it's generally a lot simpler, you just need:
function hexToBytes(hex) {
const bytes = new Uint8Array(hex.length / 2);
for (let i = 0; i !== bytes.length; i++) {
bytes[i] = parseInt(hex.substr(i * 2, 2), 16);
}
return bytes;
}
then use the encoding parameter of TextDecoder:
// UTF-8 is default
new TextDecoder().decode(hexToBytes("68656c6c6f20e6bca2e5ad9720f09f918d"));
// but you can also use:
new TextDecoder("UTF-16LE").decode(hexToBytes("680065006c006c006f002000226f575b20003dd84ddc"))
new TextDecoder("UTF-16BE").decode(hexToBytes("00680065006c006c006f00206f225b570020d83ddc4d"));
// "hello 漢字 👍"
Here's the list of allowed encoding names: https://www.w3.org/TR/encoding/#names-and-labels
You might notice UTF-32 is not on that list, which is a pain, so:
function bytesToStringUTF32(bytes, littleEndian) {
const view = new DataView(bytes.buffer);
const codepoints = new Uint32Array(view.byteLength / 4);
for (let i = 0; i !== codepoints.length; i++) {
codepoints[i] = view.getUint32(i * 4, littleEndian);
}
return String.fromCodePoint(...codepoints);
}
Then:
bytesToStringUTF32(hexToBytes("00000068000000650000006c0000006c0000006f0000002000006f2200005b57000000200001f44d"), false)
bytesToStringUTF32(hexToBytes("68000000650000006c0000006c0000006f00000020000000226f0000575b0000200000004df40100"), true)
// "hello 漢字 👍"
It depends on what encoding you use. If you want to convert utf-8 encoded hex to string, use this:
function fromHex(hex,str){
try{
str = decodeURIComponent(hex.replace(/(..)/g,'%$1'))
}
catch(e){
str = hex
console.log('invalid hex input: ' + hex)
}
return str
}
For the other direction use this:
function toHex(str,hex){
try{
hex = unescape(encodeURIComponent(str))
.split('').map(function(v){
return v.charCodeAt(0).toString(16)
}).join('')
}
catch(e){
hex = str
console.log('invalid text input: ' + str)
}
return hex
}
how do you get "\u6f22\u5b57" from 漢字 in JavaScript?
These are JavaScript Unicode escape sequences e.g. \u12AB. To convert them, you could iterate over every code unit in the string, call .toString(16) on it, and go from there.
However, it is more efficient to also use hexadecimal escape sequences e.g. \xAA in the output wherever possible.
Also note that ASCII symbols such as A, b, and - probably don’t need to be escaped.
I’ve written a small JavaScript library that does all this for you, called jsesc. It has lots of options to control the output.
Here’s an online demo of the tool in action: http://mothereff.in/js-escapes#1%E6%BC%A2%E5%AD%97
Your question was tagged as utf-8. Reading the rest of your question, UTF-8 encoding/decoding didn’t seem to be what you wanted here, but in case you ever need it: use utf8.js (online demo).
Here you go. :D
"漢字".split("").reduce((hex,c)=>hex+=c.charCodeAt(0).toString(16).padStart(4,"0"),"")
"6f225b57"
for non unicode
"hi".split("").reduce((hex,c)=>hex+=c.charCodeAt(0).toString(16).padStart(2,"0"),"")
"6869"
ASCII (utf-8) binary HEX string to string
"68656c6c6f20776f726c6421".match(/.{1,2}/g).reduce((acc,char)=>acc+String.fromCharCode(parseInt(char, 16)),"")
String to ASCII (utf-8) binary HEX string
"hello world!".split("").reduce((hex,c)=>hex+=c.charCodeAt(0).toString(16).padStart(2,"0"),"")
--- unicode ---
String to UNICODE (utf-16) binary HEX string
"hello world!".split("").reduce((hex,c)=>hex+=c.charCodeAt(0).toString(16).padStart(4,"0"),"")
UNICODE (utf-16) binary HEX string to string
"00680065006c006c006f00200077006f0072006c00640021".match(/.{1,4}/g).reduce((acc,char)=>acc+String.fromCharCode(parseInt(char, 16)),"")
Here is my take: these functions convert a UTF8 string to a proper HEX without the extra zeroes padding. A real UTF8 string has characters with 1, 2, 3 and 4 bytes length.
While working on this I found a couple key things that solved my problems:
str.split('') doesn't handle multi-byte characters like emojis correctly. The proper/modern way to handle this is with Array.from(str)
encodeURIComponent() and decodeURIComponent() are great tools to convert between string and hex. They are pretty standard, they handle UTF8 correctly.
(Most) ASCII characters (codes 0 - 127) don't get URI encoded, so they need to handled separately. But c.charCodeAt(0).toString(16) works perfectly for those
function utf8ToHex(str) {
return Array.from(str).map(c =>
c.charCodeAt(0) < 128 ? c.charCodeAt(0).toString(16) :
encodeURIComponent(c).replace(/\%/g,'').toLowerCase()
).join('');
},
function hexToUtf8: function(hex) {
return decodeURIComponent('%' + hex.match(/.{1,2}/g).join('%'));
}
Demo: https://jsfiddle.net/lyquix/k2tjbrvq/
UTF-8 Supported Convertion
Decode
function utf8ToHex(str) {
return Array.from(str).map(c =>
c.charCodeAt(0) < 128 ? c.charCodeAt(0).toString(16) :
encodeURIComponent(c).replace(/\%/g,'').toLowerCase()
).join('');
}
Encode
function hexToUtf8(hex) {
return decodeURIComponent('%' + hex.match(/.{1,2}/g).join('%'));
}

Properly parse MAC Address

When I use tools such as snmp-walk or snmp-get to query an OID with a return type of MacAddress, It'll always parse the data as a HexString and display it properly. Even when they don't have the MIBs loaded it'll still works.
bash#snmpwalk -v 2c -c public 10.1.2.3 1.3.6.1.4.1.14179.2.2.1.1
SNMPv2-SMI::enterprises.14179.2.2.1.1.1.16.189.24.206.212.64 = Hex-STRING: 10 BD 18 CE D4 40
SNMPv2-SMI::enterprises.14179.2.2.1.1.1.100.233.80.151.114.192 = Hex-STRING: 64 E9 50 97 72 C0
However, I can't seem to get the same result from Lextm.SharpSnmpLib (11.2.0). Data types of MacAddress don't get decoded correctly and it's a manual process to convert it to a proper MAC.
public void WalkTable()
{
const string baseOid = "1.3.6.1.4.1.14179.2.2.1.1"; //The entire table
const string community = "public";
var ep = new IPEndPoint(IPAddress.Parse("10.1.2.3"), 161);
var results = new List<Variable>();
Messenger.Walk(VersionCode.V2, ep, new OctetString(community), new ObjectIdentifier(baseOid), results, 60000, WalkMode.WithinSubtree);
foreach(var v in results)
Console.WriteLine(v.Data.ToString());
}
Am I doing something wrong or is this just how the library works?
You are outputting the MAC Address as ASCII instead of Hex. Here's a quick method I put together that will detect non-ascii characters and output as hex if any are found.
public void WalkTable()
{
const string baseOid = "1.3.6.1.4.1.14179.2.2.1.1"; //The entire table
const string community = "public";
var ep = new IPEndPoint(IPAddress.Parse("10.1.2.3"), 161);
var results = new List<Variable>();
Messenger.Walk(VersionCode.V2, ep, new OctetString(community), new ObjectIdentifier(baseOid), results, 60000, WalkMode.WithinSubtree);
foreach(var v in results)
//If the result is an OctetString, check for ascii, otherwise use ToString()
Console.WriteLine(v.Data.TypeCode.ToString()=="OctetString" ? DecodeOctetString(v.Data.ToBytes()) : v.Data.ToString())
}
}
public string DecodeOctetString(byte[] raw)
{
//First 2 bytes are the Type, so remove them
byte[] bytes = new byte[raw.Length - 2];
Array.Copy(raw, 2, bytes, 0, bytes.Length);
//Check if there are any non-ascii characters
bool ascii = true;
foreach (char c in Encoding.UTF8.GetString(bytes))
{
if (c >= 128)
{
ascii = false;
}
}
//If it's all ascii, return as ascii, else convert to hex
return ascii ? Encoding.ASCII.GetString(bytes) : BitConverter.ToString(bytes);
}

Convert Uint8Array[n] into integer in node.js

I have a variable Uint8Arr of type Uint8Array[4].
Uint8Arr[0]=0x12;
Uint8Arr[1]=0x19;
Uint8Arr[2]=0x21;
Uint8Arr[3]=0x47;
I want to convert Uint8Arr into its equivalent integer which is 0x12192147 or 303636807.
I would like to have a function that can convert Uint8Arr[n] into its equivalent integer and return the result in decimal.
For those who want little-endian, you can specify endianness using the DataView class.
let buff = new Uint8Array(4);
buff[0]=0x12;
buff[1]=0x19;
buff[2]=0x21;
buff[3]=0x47;
var view = new DataView(buff.buffer, 0);
view.getUint32(0, true); // true here represents little-endian
This solution will solve for Uint8Arr of any length.
function convert(Uint8Arr) {
var length = Uint8Arr.length;
let buffer = Buffer.from(Uint8Arr);
var result = buffer.readUIntBE(0, length);
return result;
}
This is one solution:
let Uint8Arr = new Uint8Array(4);
Uint8Arr[0]=0x12;
Uint8Arr[1]=0x19;
Uint8Arr[2]=0x21;
Uint8Arr[3]=0x47;
let buffer = Buffer.from(Uint8Arr);
console.log( buffer.readUInt32BE(0) );

Windows 10 get DeviceFamilyVersion

I'm working windows 10 10240 Univasal windows app, when i use Windows.System.Profile.AnalyticsInfo.VersionInfo.DeviceFamilyVersion to get deivce version, it return a string "2814750438211605" instead of a version format (major.minor.revision.build).
Anyone can tell me what the string "2814750438211605" means?
The Windows 10 OS version value is located in this string property:
Windows.System.Profile.AnalyticsInfo.VersionInfo.DeviceFamilyVersion
It returns string value like "2814750438211613".
To convert this long number to readable format use this:
string sv = AnalyticsInfo.VersionInfo.DeviceFamilyVersion;
ulong v = ulong.Parse(sv);
ulong v1 = (v & 0xFFFF000000000000L) >> 48;
ulong v2 = (v & 0x0000FFFF00000000L) >> 32;
ulong v3 = (v & 0x00000000FFFF0000L) >> 16;
ulong v4 = v & 0x000000000000FFFFL;
string version = $"{v1}.{v2}.{v3}.{v4}"; // == 10.0.10240.16413
Your application should treat the as opaque data and just log it "as is". It's a 64-bit decimal value as a string.
Remember the intent of this API is to provide a log string from which you can reconstruct the OS version number for support/analytics. On your server-side analysis, you'd convert it if needed or just use it as a unique version identifier... If you are actually trying to parse it runtime, then you are using it incorrectly and quite likely to recreate same problems that resulted in GetVersionEx and VerifyVersionInfo being deprecated in the first place.
Do not parse the string at runtime in your app. Just store "as is" Remember that with Windows 10, a customer really has no idea what you mean if you ask "What version of Windows do you have?". The answer is "10" and will likely still be "10" for a long time to come.
If you found this question and like me you are looking for a way to do this in JavaScript, then you might find this useful.
getDeviceFamilyVersion() {
let deviceFamilyVersion = Windows.System.Profile.AnalyticsInfo.versionInfo.deviceFamilyVersion;
let deviceFamilyVersionDecimalFormat = parseInt(deviceFamilyVersion);
if (isNaN(deviceFamilyVersionDecimalFormat)) {
throw new Error('cannot parse device family version number');
}
let hexString = deviceFamilyVersionDecimalFormat.toString(16).toUpperCase();
while (hexString.length !== 16) { // this is needed because JavaScript trims the leading zeros when converting to hex string
hexString = '0' + hexString;
}
let hexStringIterator = 0;
let versionString = '';
while (hexStringIterator < hexString.length) {
let subHexString = hexString.substring(hexStringIterator, hexStringIterator + 4);
let decimalValue = parseInt(subHexString, 16);
versionString += decimalValue + '.';
hexStringIterator += 4;
}
return versionString.substring(0, versionString.length - 1);
}
Just a nifty way of doing this .. I Creadted a Enum that is used to match predefined device families
public enum DeviceFamily
{
Unknown,
Desktop,
Tablet,
Mobile,
SurfaceHub,
Xbox,
Iot
}
This method will check and parse it into the enum.
var q = ResourceContext.GetForCurrentView().QualifierValues;
if (q.ContainsKey("DeviceFamily"))
{
try
{
Enum.Parse(typeof(DeviceFamily) , q["DeviceFamily"]);
//send the user notification about the device family he is in.
}
catch (Exception ex) { }
}

How do I reverse a String in Dart?

I have a String, and I would like to reverse it. For example, I am writing an AngularDart filter that reverses a string. It's just for demonstration purposes, but it made me wonder how I would reverse a string.
Example:
Hello, world
should turn into:
dlrow ,olleH
I should also consider strings with Unicode characters. For example: 'Ame\u{301}lie'
What's an easy way to reverse a string, even if it has?
The question is not well defined. Reversing arbitrary strings does not make sense and will lead to broken output. The first (surmountable) obstacle is Utf-16. Dart strings are encoded as Utf-16 and reversing just the code-units leads to invalid strings:
var input = "Music \u{1d11e} for the win"; // Music 𝄞 for the win
print(input.split('').reversed.join()); // niw eht rof
The split function explicitly warns against this problem (with an example):
Splitting with an empty string pattern ('') splits at UTF-16 code unit boundaries and not at rune boundaries[.]
There is an easy fix for this: instead of reversing the individual code-units one can reverse the runes:
var input = "Music \u{1d11e} for the win"; // Music 𝄞 for the win
print(new String.fromCharCodes(input.runes.toList().reversed)); // niw eht rof 𝄞 cisuM
But that's not all. Runes, too, can have a specific order. This second obstacle is much harder to solve. A simple example:
var input = 'Ame\u{301}lie'; // Amélie
print(new String.fromCharCodes(input.runes.toList().reversed)); // eiĺemA
Note that the accent is on the wrong character.
There are probably other languages that are even more sensitive to the order of individual runes.
If the input has severe restrictions (for example being Ascii, or Iso Latin 1) then reversing strings is technically possible. However, I haven't yet seen a single use-case where this operation made sense.
Using this question as example for showing that strings have List-like operations is not a good idea, either. Except for few use-cases, strings have to be treated with respect to a specific language, and with highly complex methods that have language-specific knowledge.
In particular native English speakers have to pay attention: strings can rarely be handled as if they were lists of single characters. In almost every other language this will lead to buggy programs. (And don't get me started on toLowerCase and toUpperCase ...).
Here's one way to reverse an ASCII String in Dart:
input.split('').reversed.join('');
split the string on every character, creating an List
generate an iterator that reverses a list
join the list (creating a new string)
Note: this is not necessarily the fastest way to reverse a string. See other answers for alternatives.
Note: this does not properly handle all unicode strings.
I've made a small benchmark for a few different alternatives:
String reverse0(String s) {
return s.split('').reversed.join('');
}
String reverse1(String s) {
var sb = new StringBuffer();
for(var i = s.length - 1; i >= 0; --i) {
sb.write(s[i]);
}
return sb.toString();
}
String reverse2(String s) {
return new String.fromCharCodes(s.codeUnits.reversed);
}
String reverse3(String s) {
var sb = new StringBuffer();
for(var i = s.length - 1; i >= 0; --i) {
sb.writeCharCode(s.codeUnitAt(i));
}
return sb.toString();
}
String reverse4(String s) {
var sb = new StringBuffer();
var i = s.length - 1;
while (i >= 3) {
sb.writeCharCode(s.codeUnitAt(i-0));
sb.writeCharCode(s.codeUnitAt(i-1));
sb.writeCharCode(s.codeUnitAt(i-2));
sb.writeCharCode(s.codeUnitAt(i-3));
i -= 4;
}
while (i >= 0) {
sb.writeCharCode(s.codeUnitAt(i));
i -= 1;
}
return sb.toString();
}
String reverse5(String s) {
var length = s.length;
var charCodes = new List(length);
for(var index = 0; index < length; index++) {
charCodes[index] = s.codeUnitAt(length - index - 1);
}
return new String.fromCharCodes(charCodes);
}
main() {
var s = "Lorem Ipsum is simply dummy text of the printing and typesetting industry.";
time('reverse0', () => reverse0(s));
time('reverse1', () => reverse1(s));
time('reverse2', () => reverse2(s));
time('reverse3', () => reverse3(s));
time('reverse4', () => reverse4(s));
time('reverse5', () => reverse5(s));
}
Here is the result:
reverse0: => 331,394 ops/sec (3 us) stdev(0.01363)
reverse1: => 346,822 ops/sec (3 us) stdev(0.00885)
reverse2: => 490,821 ops/sec (2 us) stdev(0.0338)
reverse3: => 873,636 ops/sec (1 us) stdev(0.03972)
reverse4: => 893,953 ops/sec (1 us) stdev(0.04089)
reverse5: => 2,624,282 ops/sec (0 us) stdev(0.11828)
Try this function
String reverse(String s) {
var chars = s.splitChars();
var len = s.length - 1;
var i = 0;
while (i < len) {
var tmp = chars[i];
chars[i] = chars[len];
chars[len] = tmp;
i++;
len--;
}
return Strings.concatAll(chars);
}
void main() {
var s = "Hello , world";
print(s);
print(reverse(s));
}
(or)
String reverse(String s) {
StringBuffer sb=new StringBuffer();
for(int i=s.length-1;i>=0;i--) {
sb.add(s[i]);
}
return sb.toString();
}
main() {
print(reverse('Hello , world'));
}
The library More Dart contains a light-weight wrapper around strings that makes them behave like an immutable list of characters:
import 'package:more/iterable.dart';
void main() {
print(string('Hello World').reversed.join());
}
There is a utils package that covers this function. It has some more nice methods for operation on strings.
Install it with :
dependencies:
basic_utils: ^1.2.0
Usage :
String reversed = StringUtils.reverse("helloworld");
Github:
https://github.com/Ephenodrom/Dart-Basic-Utils
Here is a function you can use to reverse strings. It takes an string as input and will use a dart package called Characters to extract characters from the given string. Then we can reverse them and join again to make the reversed string.
String reverse(String string) {
if (string.length < 2) {
return string;
}
final characters = Characters(string);
return characters.toList().reversed.join();
}
Create this extension:
extension Ex on String {
String get reverse => split('').reversed.join();
}
Usage:
void main() {
String string = 'Hello World';
print(string.reverse); // dlroW olleH
}
Reversing "Hello World"

Resources