Convert extended precision float (80-bit) to double (64-bit) in MSVC - visual-c++

What is the most portable and "right" way to do conversion from extended precision float (80-bit value, also known as long double in some compilers) to double (64-bit) in MSVC win32/win64?
MSVC currently (as of 2010) assumes that long double is double synonym.
I could probably write fld/fstp assembler pair in inline asm, but inline asm is not available for win64 code in MSVC. Do I need to move this assembler code to separate .asm file? Is that really so there are no good solution?

Just did this in x86 code...
.686P
.XMM
_TEXT SEGMENT
EXTRN __fltused:DWORD
PUBLIC _cvt80to64
PUBLIC _cvt64to80
_cvt80to64 PROC
mov eax, dword ptr [esp+4]
fld TBYTE PTR [eax]
ret 0
_cvt80to64 ENDP
_cvt64to80 PROC
mov eax, DWORD PTR [esp+12]
fld QWORD PTR [esp+4]
fstp TBYTE PTR [eax]
ret 0
_cvt64to80 ENDP
ENDIF
_TEXT ENDS
END

If your compiler / platform doesn't have native support for 80 bit floating point values, you have to decode the value yourself.
Assuming that the 80 bit float is stored within a byte buffer, located at a specific offset, you can do it like this:
float64 C_IOHandler::readFloat80(IColl<uint8> buffer, uint32 *ref_offset)
{
uint32 &offset = *ref_offset;
//80 bit floating point value according to the IEEE-754 specification and the Standard Apple Numeric Environment specification:
//1 bit sign, 15 bit exponent, 1 bit normalization indication, 63 bit mantissa
float64 sign;
if ((buffer[offset] & 0x80) == 0x00)
sign = 1;
else
sign = -1;
uint32 exponent = (((uint32)buffer[offset] & 0x7F) << 8) | (uint32)buffer[offset + 1];
uint64 mantissa = readUInt64BE(buffer, offset + 2);
//If the highest bit of the mantissa is set, then this is a normalized number.
float64 normalizeCorrection;
if ((mantissa & 0x8000000000000000) != 0x00)
normalizeCorrection = 1;
else
normalizeCorrection = 0;
mantissa &= 0x7FFFFFFFFFFFFFFF;
offset += 10;
//value = (-1) ^ s * (normalizeCorrection + m / 2 ^ 63) * 2 ^ (e - 16383)
return (sign * (normalizeCorrection + (float64)mantissa / ((uint64)1 << 63)) * g_Math->toPower(2, (int32)exponent - 16383));
}
This is how I did it, and it compiles fine with g++ 4.5.0. It of course isn't a very fast solution, but at least a functional one. This code should also be portable to different platforms, though I didn't try.

I've just written this one. It constructs an IEEE double number from IEEE extended precision number using bit operations. It takes the 10 byte extended precision number in little endian format:
typedef unsigned long long uint64;
double makeDoubleFromExtended(const unsigned char x[10])
{
int exponent = (((x[9] << 8) | x[8]) & 0x7FFF);
uint64 mantissa =
((uint64)x[7] << 56) | ((uint64)x[6] << 48) | ((uint64)x[5] << 40) | ((uint64)x[4] << 32) |
((uint64)x[3] << 24) | ((uint64)x[2] << 16) | ((uint64)x[1] << 8) | (uint64)x[0];
unsigned char d[8] = {0};
double result;
d[7] = x[9] & 0x80; /* Set sign. */
if ((exponent == 0x7FFF) || (exponent == 0))
{
/* Infinite, NaN or denormal */
if (exponent == 0x7FFF)
{
/* Infinite or NaN */
d[7] |= 0x7F;
d[6] = 0xF0;
}
else
{
/* Otherwise it's denormal. It cannot be represented as double. Translate as singed zero. */
memcpy(&result, d, 8);
return result;
}
}
else
{
/* Normal number. */
exponent = exponent - 0x3FFF + 0x03FF; /*< exponent for double precision. */
if (exponent <= -52) /*< Too small to represent. Translate as (signed) zero. */
{
memcpy(&result, d, 8);
return result;
}
else if (exponent < 0)
{
/* Denormal, exponent bits are already zero here. */
}
else if (exponent >= 0x7FF) /*< Too large to represent. Translate as infinite. */
{
d[7] |= 0x7F;
d[6] = 0xF0;
memset(d, 0x00, 6);
memcpy(&result, d, 8);
return result;
}
else
{
/* Representable number */
d[7] |= (exponent & 0x7F0) >> 4;
d[6] |= (exponent & 0xF) << 4;
}
}
/* Translate mantissa. */
mantissa >>= 11;
if (exponent < 0)
{
/* Denormal, further shifting is required here. */
mantissa >>= (-exponent + 1);
}
d[0] = mantissa & 0xFF;
d[1] = (mantissa >> 8) & 0xFF;
d[2] = (mantissa >> 16) & 0xFF;
d[3] = (mantissa >> 24) & 0xFF;
d[4] = (mantissa >> 32) & 0xFF;
d[5] = (mantissa >> 40) & 0xFF;
d[6] |= (mantissa >> 48) & 0x0F;
memcpy(&result, d, 8);
printf("Result: 0x%016llx", *(uint64*)(&result) );
return result;
}

Played with the given answers and ended up with this.
#include <cmath>
#include <limits>
#include <cassert>
#ifndef _M_X64
__inline __declspec(naked) double _cvt80to64(void* ) {
__asm {
// PUBLIC _cvt80to64 PROC
mov eax, dword ptr [esp+4]
fld TBYTE PTR [eax]
ret 0
// _cvt80to64 ENDP
}
}
#endif
#pragma pack(push)
#pragma pack(2)
typedef unsigned char tDouble80[10];
#pragma pack(pop)
typedef struct {
unsigned __int64 mantissa:64;
unsigned int exponent:15;
unsigned int sign:1;
} tDouble80Struct;
inline double convertDouble80(const tDouble80& val)
{
assert(10 == sizeof(tDouble80));
const tDouble80Struct* valStruct = reinterpret_cast<const tDouble80Struct*>(&val);
const unsigned int mask_exponent = (1 << 15) - 1;
const unsigned __int64 mantissa_high_highestbit = unsigned __int64(1) << 63;
const unsigned __int64 mask_mantissa = (unsigned __int64(1) << 63) - 1;
if (mask_exponent == valStruct->exponent) {
if(0 == valStruct->mantissa) {
return (0 != valStruct->sign) ? -std::numeric_limits<double>::infinity() : std::numeric_limits<double>::infinity();
}
// highest mantissa bit set means quiet NaN
return (0 != (mantissa_high_highestbit & valStruct->mantissa)) ? std::numeric_limits<double>::quiet_NaN() : std::numeric_limits<double>::signaling_NaN();
}
// 80 bit floating point value according to the IEEE-754 specification and
// the Standard Apple Numeric Environment specification:
// 1 bit sign, 15 bit exponent, 1 bit normalization indication, 63 bit mantissa
const double sign(valStruct->sign ? -1 : 1);
//If the highest bit of the mantissa is set, then this is a normalized number.
unsigned __int64 mantissa = valStruct->mantissa;
double normalizeCorrection = (mantissa & mantissa_high_highestbit) != 0 ? 1 : 0;
mantissa &= mask_mantissa;
//value = (-1) ^ s * (normalizeCorrection + m / 2 ^ 63) * 2 ^ (e - 16383)
return (sign * (normalizeCorrection + double(mantissa) / mantissa_high_highestbit) * pow(2.0, int(valStruct->exponent) - 16383));
}

Related

Best data type and rounding function for weight and currency variables

I need to multiply two values ​​- weight and currency (Visual c++, mfc). E.g.:
a=11.121;
b=12.11;
c=a*b;
Next I have to round "с" to 2 digits after point (currency value, e.g. 134.68). What the best data types and rounding function for this variables? The rounding procedure must be mathematically correct.
P.S. The problem was solved by very ugly but working part of code:
CString GetPriceSum(CString weight,CString price)
{
price.Replace(".", "");
price = price + "0";
if (weight.Find(".") == -1) { weight = weight + ".000"; }
weight.Replace(".", "");
unsigned long long int iprice = atoi(price);
unsigned long long int iweight = atoi(weight);
unsigned long long int isum = iprice * iweight;
CString sum = ""; sum.Format("%llu", isum);
CString r1 = sum.Right(1);
if (atoi(r1) >= 5) { isum += 10; }
CString r2 = sum.Mid(sum.GetLength() - 2, 1);
if (atoi(r2) >= 5) { isum += 100; sum.Format("%llu", isum);}
r2 = sum.Mid(sum.GetLength() - 3, 1);
if (atoi(r2) >= 5) { isum += 1000; sum.Format("%llu", isum);}
r2 = sum.Mid(sum.GetLength() - 4, 1);
if (atoi(r2) >= 5) { isum += 10000; sum.Format("%llu", isum);}
CString finsum = ""; finsum.Format("%llu", isum);
finsum.Insert(finsum.GetLength() - 6, ".");
finsum.Delete(finsum.GetLength() - 4, 4);
if (finsum.Left(1) == ".") { finsum = "0" + finsum; }
return finsum;
}
How about this: let's start from
API I use, counts values using some other language. And they round they values mathematically correct.
In your other question, you got those value as strings. You can construct an integer from those digits (remove decimal point). Assuming that the product fits in a 64-bit int, you can multiply them exactly. Now you can manually round to a desired precision and drop unneeded digits.
Code example (you may want to add error checking):
#define _CRT_SECURE_NO_WARNINGS
#include <string>
#include <iostream>
#include <sstream>
int main()
{
std::string a = "40.50";
std::string b = "0.490";
long long l1, dec1, l2, dec2;
sscanf(a.data(), "%lld.%lld", &l1, &dec1);
l1 = l1 * 100 + dec1;
sscanf(b.data(), "%lld.%lld", &l2, &dec2);
l2 = l2 * 1000 + dec2;
long long r = l1 * l2;
r /= 100;
int rem = r % 10;
r /= 10;
if (rem >= 5)
r++;
std::stringstream ss;
ss << r / 100 << "." << std::setw(2) << std::setfill('0') << r % 100;
std::cout << ss.str();
}
You can also use stringstream instead of sscanf to parse the strings.

I2C communication using MSP432G2553 and HMC5883L

I am trying to interface my HMC5883L magnetometer with my MSP432G2553 to get continuous readings on x,y and z axis to provide orientation for a device.
I am having problem getting data using the i2c communication. I am trying to get the x,y and z data but I am not getting anything from my code. I would appreciate any feedback, I am new to the I2C communication.
I found a code online and have been using it and modifying it. Here is the code:
main code:
#include <msp430g2553.h>
#include "my_types.h"
#include "i2c.h"
#define M_PI 3.14159265358979323846264338327950288
int TXByteCtr;
unsigned char PRxData;
int Rx = 0;
char WHO_AM_I = 0x1E;
char itgAddress = 0x69;
const uchar setup_hmc5883[] = {0x00, 0x70};
const uchar gain_hmc5883[] = {0x01, 0xA0};
const uchar continuous_hmc5883[] = {0x02, 0x00};
const uchar base_hmc5883[] = {0x03};
char readBuffer [6];
void init_I2C(void);
void Transmit(void);
void Receive(void);
int main(void)
{
WDTCTL = WDTPW + WDTHOLD; // Stop WDT
int x, y, z ;
float heading ;
float headingDegrees ;
P1SEL |= BIT6 + BIT7; // Assign I2C pins to USCI_B0
P1SEL2|= BIT6 + BIT7; // Assign I2C pins to USCI_B0
BCSCTL1 = CALBC1_16MHZ ;
DCOCTL = CALDCO_16MHZ ;
init_I2C();
Rx = 0;
TXByteCtr = 1;
writei2c(0x3C,setup_hmc5883,2);
TXByteCtr = 1;
writei2c(0x3C,gain_hmc5883,2);
TXByteCtr = 1;
writei2c(0x3C,continuous_hmc5883,2);
while(1){
writei2c(0x3C, base_hmc5883, 1);
__delay_cycles(100000);
readi2c(0X3D, readBuffer, 6);
x = readBuffer[0] ;
x |= readBuffer[1] << 8 ;
z = readBuffer[2] ;
z |= readBuffer[3] << 8 ;
y = readBuffer[4] ;
y |= readBuffer[5] << 8 ;
heading = atan2(y, x);
if(heading < 0) heading += 2*M_PI;
headingDegrees = heading * 180/M_PI;
}
}
//-------------------------------------------------------------------------------
// The USCI_B0 data ISR is used to move received data from the I2C slave
// to the MSP430 memory. It is structured such that it can be used to receive
//-------------------------------------------------------------------------------
#pragma vector = USCIAB0TX_VECTOR
__interrupt void USCIAB0TX_ISR(void)
{
if(Rx == 1){ // Master Recieve?
PRxData = UCB0RXBUF; // Get RX data
__bic_SR_register_on_exit(CPUOFF); // Exit LPM0
}
else{ // Master Transmit
if (TXByteCtr) // Check TX byte counter
{
if ((IFG2 & UCB0TXIFG) || (IFG2 & UCB0RXIFG))
i2cDataInterruptService();
TXByteCtr--; // Decrement TX byte counter
if(!TXByteCtr)
{
UCB0CTL1 |= UCTXSTP; // I2C stop condition
IFG2 &= ~UCB0TXIFG; // Clear USCI_B0 TX int flag
__bic_SR_register_on_exit(CPUOFF); // Exit LPM0
}
}
}
}
void init_I2C(void) {
UCB0CTL1 |= UCSWRST; // Enable SW reset
UCB0CTL0 = UCMST + UCMODE_3 + UCSYNC; // I2C Master, synchronous mode
UCB0CTL1 = UCSSEL_2 + UCSWRST; // Use SMCLK, keep SW reset
UCB0BR0 = 12; // fSCL = SMCLK/12 = ~100kHz
UCB0BR1 = 0;
UCB0I2CSA = itgAddress; // Slave Address is 069h
UCB0CTL1 &= ~UCSWRST; // Clear SW reset, resume operation
IE2 |= UCB0RXIE + UCB0TXIE; //Enable RX and TX interrupt
}
i2c.c:
#include "i2c.h"
volatile char i2cBusy = 0;
volatile unsigned char i2cBufferLength = 0 ;
unsigned char * i2cBufferPtr ;
volatile char txDone = 1 ;
volatile char rxDone = 1 ;
void initi2c(unsigned int divider){
P1SEL |= BIT6 + BIT7 ;
P1SEL2 |= BIT6 + BIT7 ;
UCB0CTL1 |= UCSWRST ;
UCB0CTL0 = UCMST + UCMODE_3 + UCSYNC ;
UCB0CTL1 = UCSSEL_2 + UCSWRST ;
UCB0BR0 = divider & 0x00FF ;
UCB0BR1 = ((divider & 0xFF00) >> 8) ;
UCB0I2CSA = 0x069 ;
UCB0I2CIE = UCNACKIE | UCALIE;
UCB0CTL1 &= ~UCSWRST ;
IE2 |= UCB0TXIE | UCB0RXIE ;
}
char writei2c(unsigned char addr, unsigned char * data, unsigned char nbData){
UCB0I2CSA = addr ;
i2cBusy = 0 ;
i2cBufferPtr = data ;
i2cBufferLength = nbData ;
txDone = 0 ;
UCB0CTL1 |= UCTR + UCTXSTT;
__bis_SR_register(CPUOFF + GIE); // Enter LPM0 w/ interrupts
//while(txDone == 0) ;
//return txDone ;
}
char readi2c(unsigned char addr, unsigned char * data, unsigned char nbData){
UCB0I2CSA = addr ;
i2cBusy = 0 ;
i2cBufferPtr = data ;
i2cBufferLength = nbData ;
rxDone = 0 ;
UCB0CTL1 &= ~UCTR;
UCB0CTL1 |= UCTXSTT;
while(UCB0CTL1 & UCTXSTT);
UCB0CTL1 |= UCTXSTP ;
__bis_SR_register(CPUOFF + GIE); // Enter LPM0 w/ interrupts
while(rxDone == 0) ;
return rxDone ;
}
inline void i2cDataInterruptService(void){
if ((IFG2 & UCB0TXIFG) != 0 || (IFG2 & UCB0RXIFG) != 0){
if((UCB0CTL1 & UCTR) != 0){
if(i2cBusy >= i2cBufferLength){
UCB0CTL1 |= UCTXSTP ;
i2cBusy = 0 ;
txDone = 1 ;
IFG2 &= ~UCB0TXIFG;
}else{
UCB0TXBUF = i2cBufferPtr[i2cBusy] ;
i2cBusy ++ ;
}
}else{
if((i2cBufferLength - i2cBusy) == 1){ // may generate a repeated stop condition
UCB0CTL1 |= UCTXSTP ;
}
i2cBufferPtr[i2cBusy] = UCB0RXBUF;
i2cBusy ++ ;
if(i2cBusy >= i2cBufferLength){
i2cBusy = 0 ;
rxDone = 1 ;
}
}
}
}
inline void i2cErrorInterruptService(void){
if ((UCB0STAT & UCNACKIFG) != 0) {
UCB0CTL1 |= UCTXSTP;
UCB0STAT &= ~UCNACKIFG;
i2cBusy = -1;
txDone = -1 ;
rxDone = -1 ;
}else if ((UCB0STAT & UCALIE) != 0) {
UCB0CTL1 |= UCTXSTP;
UCB0STAT &= ~UCALIE;
i2cBusy = -1;
txDone = -1 ;
rxDone = -1 ;
}
}

Generate Checksum for String

I would like to Generate Checksum for Strings/Data
1. The same data should produce the same Checksum
2. Two different data strings can't product same checksum. Random collision of 0.1% can be negligible
3. No encryption/decryption of data
4. Checksum length need not be too huge and contains letters and characters.
5. Must be too fast and efficient. Imagine generating checksum(s) for 100 Mb of text data should be in less than 5mins. Generating 1000 checksums for less than 1 KB of each segment data should be in less than 10 seconds.
Any algorithm or implementation reference and suggestions are most appreciated.
You can write a custom hash function: (c++)
long long int hash(String s){
long long k = 7;
for(int i = 0; i < s.length(); i++){
k *= 23;
k += s[i];
k *= 13;
k %= 1000000009;
}
return k;
}
This should give you a well (collision free for most samples) hash value.
A very common, fast checksum is the CRC-32, a 32-bit polynomial cyclic redundancy check. Here are three implementations in C, which vary in speed vs. complexity, of the CRC-32: (This is from http://www.hackersdelight.org/hdcodetxt/crc.c.txt)
#include <stdio.h>
#include <stdlib.h>
// ---------------------------- reverse --------------------------------
// Reverses (reflects) bits in a 32-bit word.
unsigned reverse(unsigned x) {
x = ((x & 0x55555555) << 1) | ((x >> 1) & 0x55555555);
x = ((x & 0x33333333) << 2) | ((x >> 2) & 0x33333333);
x = ((x & 0x0F0F0F0F) << 4) | ((x >> 4) & 0x0F0F0F0F);
x = (x << 24) | ((x & 0xFF00) << 8) |
((x >> 8) & 0xFF00) | (x >> 24);
return x;
}
// ----------------------------- crc32a --------------------------------
/* This is the basic CRC algorithm with no optimizations. It follows the
logic circuit as closely as possible. */
unsigned int crc32a(unsigned char *message) {
int i, j;
unsigned int byte, crc;
i = 0;
crc = 0xFFFFFFFF;
while (message[i] != 0) {
byte = message[i]; // Get next byte.
byte = reverse(byte); // 32-bit reversal.
for (j = 0; j <= 7; j++) { // Do eight times.
if ((int)(crc ^ byte) < 0)
crc = (crc << 1) ^ 0x04C11DB7;
else crc = crc << 1;
byte = byte << 1; // Ready next msg bit.
}
i = i + 1;
}
return reverse(~crc);
}
// ----------------------------- crc32b --------------------------------
/* This is the basic CRC-32 calculation with some optimization but no
table lookup. The the byte reversal is avoided by shifting the crc reg
right instead of left and by using a reversed 32-bit word to represent
the polynomial.
When compiled to Cyclops with GCC, this function executes in 8 + 72n
instructions, where n is the number of bytes in the input message. It
should be doable in 4 + 61n instructions.
If the inner loop is strung out (approx. 5*8 = 40 instructions),
it would take about 6 + 46n instructions. */
unsigned int crc32b(unsigned char *message) {
int i, j;
unsigned int byte, crc, mask;
i = 0;
crc = 0xFFFFFFFF;
while (message[i] != 0) {
byte = message[i]; // Get next byte.
crc = crc ^ byte;
for (j = 7; j >= 0; j--) { // Do eight times.
mask = -(crc & 1);
crc = (crc >> 1) ^ (0xEDB88320 & mask);
}
i = i + 1;
}
return ~crc;
}
// ----------------------------- crc32c --------------------------------
/* This is derived from crc32b but does table lookup. First the table
itself is calculated, if it has not yet been set up.
Not counting the table setup (which would probably be a separate
function), when compiled to Cyclops with GCC, this function executes in
7 + 13n instructions, where n is the number of bytes in the input
message. It should be doable in 4 + 9n instructions. In any case, two
of the 13 or 9 instrucions are load byte.
This is Figure 14-7 in the text. */
unsigned int crc32c(unsigned char *message) {
int i, j;
unsigned int byte, crc, mask;
static unsigned int table[256];
/* Set up the table, if necessary. */
if (table[1] == 0) {
for (byte = 0; byte <= 255; byte++) {
crc = byte;
for (j = 7; j >= 0; j--) { // Do eight times.
mask = -(crc & 1);
crc = (crc >> 1) ^ (0xEDB88320 & mask);
}
table[byte] = crc;
}
}
/* Through with table setup, now calculate the CRC. */
i = 0;
crc = 0xFFFFFFFF;
while ((byte = message[i]) != 0) {
crc = (crc >> 8) ^ table[(crc ^ byte) & 0xFF];
i = i + 1;
}
return ~crc;
}
If you simply google "CRC32", you will get more info than you could possibly absorb.

Two questions with base64 encoding

I confused how to convert const char * to base64 with 2 Questions:
Question #1 how do I defined the length of output string that would perfectly match the length of output base64?I have found a code which from apple opensource,the code in below http://www.opensource.apple.com/source/QuickTimeStreamingServer/QuickTimeStreamingServer-452/CommonUtilitiesLib/base64.c
or I could directly use "atlenc.h" in VC++.if the length of coded_dst which I have defined is smaller than the actually,the program may crashed
int Base64encode(char *coded_dst, const char *plain_src, int len_plain_src)
{
const char basis_64[] ="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
int i;
char *p;
p = coded_dst;
for (i = 0; i < len_plain_src - 2; i += 3) {
*p++ = basis_64[(plain_src[i] >> 2) & 0x3F];
*p++ = basis_64[((plain_src[i] & 0x3) << 4) |
((int) (plain_src[i + 1] & 0xF0) >> 4)];
*p++ = basis_64[((plain_src[i + 1] & 0xF) << 2) |
((int) (plain_src[i + 2] & 0xC0) >> 6)];
*p++ = basis_64[plain_src[i + 2] & 0x3F];
}
if (i < len_plain_src) {
*p++ = basis_64[(plain_src[i] >> 2) & 0x3F];
if (i == (len_plain_src - 1)) {
*p++ = basis_64[((plain_src[i] & 0x3) << 4)];
*p++ = '=';
}
else {
*p++ = basis_64[((plain_src[i] & 0x3) << 4) |
((int) (plain_src[i + 1] & 0xF0) >> 4)];
*p++ = basis_64[((plain_src[i + 1] & 0xF) << 2)];
}
*p++ = '=';
}
*p++ = '\0';
return p - coded_dst;
}
Question #2 as we all well know that the type of byte in C++ is unsigned char,how do I convert the char * to unsigned char *?
thanks
regards
Ken
The design of your function, based on the signature, tells me it's up to the caller to provide a sufficient buffer for output. This would be unsafe in your example because the caller isn't informing the function how large that buffer is. Your function has no chance to limit output to coded_dst to the buffer provided, so you should add, at the least, a parameter for that.
As such, you would need to check as you loop to be sure p, a pointer into coded_dst, stays within that limit, returning an error to the caller if there's insufficient room.
That said, notice how many increments of p occur for every 3 source items processed. The ratio is 3/4...for every 3 that go into that loop, 4 come out. So, to start the calculation of the required length, begin with
( len_plain_src / 3 ) * 4;
Now, consider r = len_plain_src % 3; If r is zero, your algorithm adds 2 more bytes. If r has a remainder, your algorithm adds 3 more bytes.
After that, you append a zero terminator.
Look carefully, I've not clearly analyzed this, but you may have an error in the closing '=' appended at the tail for the case where (i<len_plain_src) - you may have added two of them instead of just one.
Now, to handle the unsigned char, you could change the declaration and initial assignment of p with,
unsigned char * p = (unsigned char *) coded_dst;
At which point it would be more convenient for you if you declare basis_64 to be unsigned char

UTF-8 to Latin(ISO-8859-1) Conversion in C++

I would like to know how to write code that performs a UTF-8 to Latin(ISO-8859-1) Conversion in C++.
The following website does the conversion required:
http://www.unicodetools.com/unicode/utf8-to-latin-converter.php
Inserting value: úsername
provides the result: úsername
I've got a piece of code that does a similar job from a previous post but doesn't seem to convert the string
int utf8_to_unicode(std::deque<int> &coded)
{
int charcode = 0;
int t = coded.front();
coded.pop_front();
if (t < 128)
{
return t;
}
int high_bit_mask = (1 << 6) -1;
int high_bit_shift = 0;
int total_bits = 0;
const int other_bits = 6;
while((t & 0xC0) == 0xC0)
{
t <<= 1;
t &= 0xff;
total_bits += 6;
high_bit_mask >>= 1;
high_bit_shift++;
charcode <<= other_bits;
charcode |= coded.front() & ((1 << other_bits)-1);
coded.pop_front();
}
charcode |= ((t >> high_bit_shift) & high_bit_mask) << total_bits;
return charcode;
}
Help please!
You need the iconv(3) function from libiconv. The first argument (some iconv_t) to the iconv conversion function should be obtained by iconv_open(3) at program initialization, probably with
ic = iconv_open("ISO-8859-1","UTF-8");
(where ic is some static or global iconv_t variable).

Resources