error #29 Expected an expression in C - bluetooth

Code snippet :
#define ATT_UUID_SIZE 16
#define ATT_BT_UUID_SIZE 2
typedef unsigned char uint8;
typedef struct
{
uint8 len; //!< Length of UUID (2 or 16)
uint8 uuid[ATT_UUID_SIZE]; //!< 16 or 128 bit UUID
} attAttrType_t;
typedef struct
{
uint16 startHandle;
uint16 endHandle;
attAttrType_t type;
} attReadByTypeReq_t;
attReadByTypeReq_t req;
req.startHandle=svcStartHdl;
req.endHandle=svcEndHdl;
req.type.len=ATT_BT_UUID_SIZE;
req.type.uuid[ATT_BT_UUID_SIZE]={0xAD,0x2B};
=> it says error #29 when I assign 0xAD and 0x2B to as the uuid what could i be doing wrong??

req.type.uuid[ATT_BT_UUID_SIZE]={0xAD,0x2B};
This line is not correct. uuid of uuid[xxx] is an array address, which can't be assigned a value via '=' method. Do like this:
req.type.uuid[0] = 0xAD;
req.type.uuid[1] = 0x2B;
or like this:
memcpy(req.type.uuid, YOUR_VALUE, ATT_BT_UUID_SIZE);

Related

Strange byte alignment between windows and Linux gcc compiler

I see a strange behavior of reading a bin file and mapping in to the structure in the windows compared to linux gcc compiler.
Below is my c code:
#include <inttypes.h>
#include <stdio.h>
#include <fcntl.h>
#define IV_MAX_LEN 32
#define HASH_MAX_LEN 64
#define MAX_NUM_IMGS 8
#define MAX_NUM_SRK_RECORDS 4
#define SKIP_OFFSET_1K 0x400
typedef struct {
uint8_t version;
uint16_t length;
uint8_t tag;
uint16_t srk_table_offset;
uint16_t cert_offset;
uint16_t blob_offset;
uint16_t signature_offset;
uint32_t reserved;
} __attribute__((packed)) blk_hdr_t;
typedef struct {
uint32_t offset;
uint32_t size;
uint64_t dst;
uint64_t entry;
uint32_t hab_flags;
uint32_t meta;
uint8_t hash[HASH_MAX_LEN];
uint8_t iv[IV_MAX_LEN];
} __attribute__((packed)) boot_t;
typedef struct {
uint8_t version;
uint16_t length;
uint8_t tag;
uint32_t flags;
uint16_t sw_version;
uint8_t fuse_version;
uint8_t num_images;
uint16_t sig_blk_offset;
uint16_t reserved;
boot_t img[MAX_NUM_IMGS];
blk_hdr_t blk_hdr;
uint32_t sigblk_size;
uint32_t padding;
} __attribute__((packed)) headerMain_t;
int main()
{
int ofd =1;
char filename[] = "sample.bin";
int readSizes= 0;
int headerSizes= 0;
headerMain_t header;
uint8_t *byte;
ofd = open(filename, O_RDONLY);
printf("\nOPENING File: %s!\n", filename);
printf("\n STRUCT sizes: %d, %d, %d,\n", sizeof(headerMain_t), sizeof(boot_t), sizeof(blk_hdr_t));
#if 0
if(lseek(ofd, SKIP_OFFSET_1K, SEEK_SET) < 0) {
printf("Error Read \n");
}
#endif
readSizes = read(ofd, &header, sizeof(header)) ;
headerSizes = sizeof(header);
printf("\n Read SIZE: %d / %d !",readSizes,headerSizes);
printf("\n Read Bytes: \n");
byte = (uint8_t*)&header;
for(int i=0; i<readSizes; i++)
{
printf("0x%02x, ",*byte);
byte++;
if(0 ==i%20 )
printf("\n");
}
return 0;
}
and here is the input binary file it reads. (This sample bin file is 0x01 (20 times)... 0xFF(20 times) = so 255 x 20 = 5100 bytes)
The same code is compiled and run in windows mingW-gcc and linux gcc.
Following are the strange observations seen in the windows run:
blk_hdr_t struct although 4 byte aligned and 16 bytes it total: 22 bytes
(Update: I found that this solution worked with -mno-ms-bitfields option)
Although 5100 bytes are available, read() function could read only 1000 bytes. what choked it further to read ?
read() also puts the "holes" with "0x00" bytes value in it. I don't understand this strange behaviour.
Enabling the lseek to skip first 1024 bytes shall reach the end of the file.
Everything looks perfect on the linux result (although point 3) does exist on the linux side too: although this is trivial for me, i'm curious on the behaviour)
so finally, How can make this code on windows gcc with the Exact result as in the linux gcc ?
could anyone please enlighten me ?
(The parameters in the structures cannot be reshuffled)

struct one byte larger than expected

I have a the following union
typedef union {
uint8_t bValue;
uint16_t uiValue;
int16_t iValue;
uint32_t ulValue;
int32_t lValue;
struct { // Float messages
float fValue;
uint8_t fPrecision; //Number of decimals when serializing
};
struct { //Presentation messages
uint8_t version; // Library version
uint8_t sensorType; // Sensor type hint for controller, see table above
};
char data[MAX_PAYLOAD + 1];
} mysensor_payload_data;
and i want to use this union in the following typedef
typedef union {
struct {
#endif
uint8_t last; // 8 bit - Id of last node this message passed
uint8_t sender; // 8 bit - Id of sender node (origin)
uint8_t destination; // 8 bit - Id of destination node
uint8_t version_length; // 2 bit - Protocol version
// 1 bit - Signed flag
// 5 bit - Length of payload
uint8_t command_ack_payload; // 3 bit - Command type
// 1 bit - Request an ack - Indicator that receiver should send an ack back.
// 1 bit - Is ack messsage - Indicator that this is the actual ack message.
// 3 bit - Payload data type
uint8_t type; // 8 bit - Type varies depending on command
uint8_t sensor; // 8 bit - Id of sensor that this message concerns.
// Each message can transfer a payload. We add one extra byte for string
// terminator \0 to be "printable" this is not transferred OTA
// This union is used to simplify the construction of the binary data types transferred.
mysensor_payload_data payload_data;
};
uint8_t array[HEADER_SIZE + MAX_PAYLOAD + 1];
} /*__attribute__((packed))*/ MyMessage;
now when I run the following line of code
strncpy(message->payload_data.data, value, length);
the bytes get written at location 0x20000498 when "message" is at 0x2000490. I would expect the byte to be at 0x20000497 if i count correctly.
Why does it write at that location?

BitTorrent test case failing with libsodium

I'm trying to run the test vector as described in BitTorrent BEP 44 test #1, but I'm not creating the same signature as they do:
305ac8aeb6c9c151fa120f120ea2cfb923564e11552d06a5d856091e5e853cff
1260d3f39e4999684aa92eb73ffd136e6f4f3ecbfda0ce53a1608ecd7ae21f01
Instead, the signature I create using libsodium is:
c44ad65291c2b1087218db8a43e3fa7b73cfa01b585b0ff9e6b962ed50e701a1
6065277417ff5bbae43d9b76e52129d27bf2e33e8b043ea67ace7ff91dae4d02
Using this code:
#include <string.h>
#include <stdio.h>
#include <sodium/crypto_sign.h>
// Test vector #1 from http://bittorrent.org/beps/bep_0044.html
// Using libsodium.
int main(int argc, char *argv[])
{
const char* buf = "3:seqi1e1:v12:Hello World!";
const char* sk =
"\xe0\x6d\x31\x83\xd1\x41\x59\x22\x84\x33\xed\x59\x92\x21\xb8\x0b"
"\xd0\xa5\xce\x83\x52\xe4\xbd\xf0\x26\x2f\x76\x78\x6e\xf1\xc7\x4d"
"\xb7\xe7\xa9\xfe\xa2\xc0\xeb\x26\x9d\x61\xe3\xb3\x8e\x45\x0a\x22"
"\xe7\x54\x94\x1a\xc7\x84\x79\xd6\xc5\x4e\x1f\xaf\x60\x37\x88\x1d";
unsigned char signature[crypto_sign_BYTES];
crypto_sign_detached(signature,
NULL,
(const unsigned char*) buf,
strlen(buf),
(const unsigned char*) sk);
char signed_buf[crypto_sign_BYTES * 2];
for (int i = 0; i < sizeof(signature); ++i) {
sprintf(signed_buf + i*2, "%.2x", signature[i]);
}
printf("%s\n", signed_buf);
}
Seems to be something silly I'm missing, but I just can't see it.
As explained here there appear to be (at least) two different formats for private keys. One of them is called ref10 and it is the one used by libsodium. It's composed of 32 bytes of seed concatenated with another 32 bytes of public key.
I couldn't find the name of the other format, but - as also explained in the above link - it's basically the seed hashed with sha512. More precisely
void ref10_to_lib(
unsigned char *private_key,
const unsigned char *ref10_private_key)
{
sha512(ref10_private_key, 32, private_key);
private_key[0] &= 248;
private_key[31] &= 63;
private_key[31] |= 64;
}
The BitTorrent specification uses the second format and to be able to use it, one must use the deprecated crypto_sign_edwards25519sha512batch function instead of crypto_sign_detached as such:
#include <string.h>
#include <stdio.h>
#include <sodium/crypto_sign.h>
#include <sodium/crypto_sign_edwards25519sha512batch.h>
// Test vector #1 from http://bittorrent.org/beps/bep_0044.html
// Using libsodium.
int main(int argc, char *argv[])
{
const char* buf = "3:seqi1e1:v12:Hello World!";
const char* sk =
"\xe0\x6d\x31\x83\xd1\x41\x59\x22\x84\x33\xed\x59\x92\x21\xb8\x0b"
"\xd0\xa5\xce\x83\x52\xe4\xbd\xf0\x26\x2f\x76\x78\x6e\xf1\xc7\x4d"
"\xb7\xe7\xa9\xfe\xa2\xc0\xeb\x26\x9d\x61\xe3\xb3\x8e\x45\x0a\x22"
"\xe7\x54\x94\x1a\xc7\x84\x79\xd6\xc5\x4e\x1f\xaf\x60\x37\x88\x1d";
unsigned char signature[crypto_sign_BYTES];
crypto_sign_edwards25519sha512batch(
signature,
NULL,
(const unsigned char*) buf,
strlen(buf),
(const unsigned char*) sk);
char signed_buf[crypto_sign_BYTES * 2];
for (int i = 0; i < sizeof(signature); ++i) {
sprintf(signed_buf + i*2, "%.2x", signature[i]);
}
printf("%s\n", signed_buf);
}

I have a RGBA image file which I want to convert to BMP in C++

I am new to interpreting image formats.
I have read about imagemagick but are there any other libraries available which I can use ?
Or is it much like reading rgba file and rearranging the data. any code snippet?
Any help here would be of great help.
EDIT: I have written below code to convert RGBA image to BMP but output image is invalid when viewed. Can i get help here?
Referred post by #bRad Gibson IntegerfileToBMP
#include <iostream>
#include <fstream>
#include <cstdint>
#include <iterator>
#include <vector>
using namespace std;
typedef unsigned char BYTE; // 1byte
typedef unsigned short WORD; // 2bytes uint16_t
typedef unsigned long DWORD; //4bytes uint32_t
typedef unsigned long int LONG;
struct BMP
{
BMP();
struct
{
WORD ID;
DWORD fileSizeInBytes;
WORD reserved1;
WORD reserved2;
DWORD pixelArrayOffsetInBytes;
} FileHeader;
enum class CompressionMethod : DWORD { BI_RGB = 0x00,
BI_RLE8 = 0x01,
BI_RLE4 = 0x02,
BI_BITFIELDS = 0x03,
BI_JPEG = 0x04,
BI_PNG = 0x05,
BI_ALPHABITFIELDS = 0x06 };
struct
{
DWORD headerSizeInBytes;
DWORD bitmapWidthInPixels;
DWORD bitmapHeightInPixels;
WORD colorPlaneCount;
WORD bitsPerPixel;
CompressionMethod compressionMethod;
DWORD bitmapSizeInBytes;
int32_t horizontalResolutionInPixelsPerMeter;
int32_t verticalResolutionInPixelsPerMeter;
DWORD paletteColorCount;
DWORD importantColorCount;
} DIBHeader;
};
BMP::BMP()
{
//Initialized fields
FileHeader.ID = 0x4d42; // == 'BM' (little-endian)
FileHeader.reserved1 = 0;
FileHeader.reserved2 = 0;
FileHeader.pixelArrayOffsetInBytes = sizeof( FileHeader ) + sizeof( DIBHeader );
DIBHeader.headerSizeInBytes = 40;
DIBHeader.colorPlaneCount = 1;
DIBHeader.bitsPerPixel = 32;
DIBHeader.compressionMethod = CompressionMethod::BI_RGB;
DIBHeader.horizontalResolutionInPixelsPerMeter = 2835; // == 72 ppi
DIBHeader.verticalResolutionInPixelsPerMeter = 2835; // == 72 ppi
DIBHeader.paletteColorCount = 0;
DIBHeader.importantColorCount = 0;
}
int main()
{
const std::string rgbaFilename = "rgbaFile.rgba";
const std::string bitmapFilename = "bitmapFile.bmp";
//Read RGBA file
ifstream readRGBA(rgbaFilename, ios::binary);
if(!readRGBA)
return 0;
std::vector<char> buffer((
std::istreambuf_iterator<char>(readRGBA)),
(std::istreambuf_iterator<char>()));
//Construct .bmp
BMP bmp;
size_t charCount = buffer.size();
bmp.DIBHeader.bitmapSizeInBytes = charCount * sizeof( buffer[ 0 ] );
bmp.FileHeader.fileSizeInBytes = bmp.FileHeader.pixelArrayOffsetInBytes + bmp.DIBHeader.bitmapSizeInBytes;
bmp.DIBHeader.bitmapWidthInPixels = static_cast< uint32_t >( ceil( (sqrt( (float)charCount ) ) ));
bmp.DIBHeader.bitmapHeightInPixels = static_cast< uint32_t >( ceil( charCount / static_cast< float >( bmp.DIBHeader.bitmapWidthInPixels ) ) );
std::ofstream writeBMP( bitmapFilename, std::ofstream::binary );
if( !writeBMP )
return 0;
writeBMP.write( reinterpret_cast< char * >( &bmp ), sizeof( bmp ) );
writeBMP.write( reinterpret_cast< char * >( &buffer[ 0 ] ), bmp.DIBHeader.bitmapSizeInBytes );
return 0;
}
There are many image libraries to convert image data. As you noted, ImageMagick is one such library which is capable of converting RAW (RGB[A]) data into a BMP.
However, the BMP format is relatively simple, compared to many other image format. RAW data can be converted into a BMP with a small amount of code, so if you would like to reduce your external dependencies (eg. not use external code), writing your own BMP serialization code does not necessarily warrant integrating an external library.
See here for a simple example that reads and writes the format to/from RAW data.

how to get the version of jemalloc

I use the following code, but does not get the right version string of jemalloc.
size_t size = 1000;
char *ptr = (char *) malloc(size);
mallctl("version", ptr, &size, NULL, 0);
I just got a 4-bits size string, and I printed it out found not the version string.
I think the problem is the version string is a const char*. But if I call with a const char*, what size should I fill in?
You should fill in the size of a const char *, of course.
The "version" parameter is a const char *, which is four bytes on your platform. This function doesn't get the version string but actually gets a pointer to the version string. You don't need to allocate space for the version, just a pointer. Here's working example code:
#include "stdio.h"
#include "jemalloc/jemalloc.h"
int main(void)
{
const char *j;
size_t s = sizeof(j);
mallctl("version", &j, &s, NULL, 0);
printf("%s\n", j);
}

Resources