I use the following code, but does not get the right version string of jemalloc.
size_t size = 1000;
char *ptr = (char *) malloc(size);
mallctl("version", ptr, &size, NULL, 0);
I just got a 4-bits size string, and I printed it out found not the version string.
I think the problem is the version string is a const char*. But if I call with a const char*, what size should I fill in?
You should fill in the size of a const char *, of course.
The "version" parameter is a const char *, which is four bytes on your platform. This function doesn't get the version string but actually gets a pointer to the version string. You don't need to allocate space for the version, just a pointer. Here's working example code:
#include "stdio.h"
#include "jemalloc/jemalloc.h"
int main(void)
{
const char *j;
size_t s = sizeof(j);
mallctl("version", &j, &s, NULL, 0);
printf("%s\n", j);
}
Related
I am learning ioctl functionality in device drivers,
There is a function pointer .compat_ioctl in file_operations which allows 32-bit processes to use ioctls in 64-bit machines.
Following is my Driver Code:
#ifndef __IOCTL_CMD_H
#define __IOCTL_CMD_H
#define MSG_MAGIC_NUMBER 0x21
#define MSG_IOCTL_GET_LENGTH _IOR(MSG_MAGIC_NUMBER, 1, unsigned int)
#define MSG_IOCTL_CLEAR_BUFFER _IO(MSG_MAGIC_NUMBER, 2)
#define MSG_IOCTL_FILL_BUFFER _IOW(MSG_MAGIC_NUMBER, 3, unsigned char)
#define MSG_GET_ADDRESS _IOR(MSG_MAGIC_NUMBER, 4, unsigned long)
#define MSG_IOCTL_MAX_CMDS 4
#endif
long device_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned char ch;
int retval = 0;
long size = _IOC_SIZE(cmd);
pr_info("%s: Cmd:%u\t Arg:%lu Size:%lu add:%p\n", __func__, cmd, arg, size, &ch);
if (_IOC_TYPE(cmd) != MSG_MAGIC_NUMBER) return -ENOTTY;
if (_IOC_NR(cmd) > MSG_IOCTL_MAX_CMDS) return -ENOTTY;
//access_ok is kernel-oriented, so the concept of read and write is reversed
retval = access_ok((void __user *)arg, size);
pr_info("access_ok returned:%d\n", retval);
if (!retval)
return -EFAULT;
switch(cmd)
{
//Get Length of buffer
case MSG_IOCTL_GET_LENGTH:
pr_info("Get Buffer Length\n");
put_user(MAX_SIZE, (unsigned int *)arg);
break;
//clear buffer
case MSG_IOCTL_CLEAR_BUFFER:
pr_info("Clear buffer\n");
memset(kernel_buffer, 0, sizeof(kernel_buffer));
break;
//fill character
case MSG_IOCTL_FILL_BUFFER:
get_user(ch, (unsigned char *)arg);
pr_info("Fill Character:%c\n", ch);
memset(kernel_buffer, ch, sizeof(kernel_buffer));
buffer_index = sizeof(kernel_buffer);
break;
//address of kernel buffer
case MSG_GET_ADDRESS:
put_user(0x12345678, (unsigned long*)arg);
pr_info("MSG_GET_ADDRESS\n");
break;
default:
pr_info("Unknown Command:%u\n", cmd);
return -ENOTTY;
}
return 0;
}
long device_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned char ch;
int retval = 0;
long size = _IOC_SIZE(cmd);
pr_info("%s: Cmd:%u\t Arg:%lu Size:%lu add:%p\n", __func__, cmd, arg, size, &ch);
if (_IOC_TYPE(cmd) != MSG_MAGIC_NUMBER) return -ENOTTY;
if (_IOC_NR(cmd) > MSG_IOCTL_MAX_CMDS) return -ENOTTY;
//access_ok is kernel-oriented, so the concept of read and write is reversed
retval = access_ok((void __user *)arg, size);
pr_info("access_ok returned:%d\n", retval);
if (!retval)
return -EFAULT;
switch(cmd)
{
//Get Length of buffer
case MSG_IOCTL_GET_LENGTH:
pr_info("Get Buffer Length\n");
put_user(MAX_SIZE, (unsigned int *)arg);
break;
//clear buffer
case MSG_IOCTL_CLEAR_BUFFER:
pr_info("Clear buffer\n");
memset(kernel_buffer, 0, sizeof(kernel_buffer));
break;
//fill character
case MSG_IOCTL_FILL_BUFFER:
get_user(ch, (unsigned char *)arg);
pr_info("Fill Character:%c\n", ch);
memset(kernel_buffer, ch, sizeof(kernel_buffer));
buffer_index = sizeof(kernel_buffer);
break;
//address of kernel buffer
case MSG_GET_ADDRESS:
put_user(0x12345678, (unsigned long*)arg);
pr_info("MSG_GET_ADDRESS\n");
break;
default:
pr_info("Unknown Command:%u\n", cmd);
return -ENOTTY;
}
return 0;
}
struct file_operations device_fops = {
.read = device_read,
.write = device_write,
.open = device_open,
.release = device_release,
.llseek = device_lseek,
.unlocked_ioctl = device_ioctl,
.compat_ioctl = device_compat_ioctl
};
MSG_GET_ADDRESS ioctl accepts unsigned long long which is 4 bytes in 32 bit process and 8 bytes in 64 bit process. That is the reason i wrote compat_ioctl.
When i call the below code from user space (32-bit process), it fails with unknown ioctl in compat_ioctl definition.
int main(int argc, char *argv[])
{
char buffer[1024];
int fd;
unsigned int length;
int i = 0;
unsigned long addr;
fd = open("/dev/msg", O_RDWR);
if (fd < 0) {
perror("fd failed");
exit(2);
}
printf("Size:%d\n", _IOC_SIZE(MSG_GET_ADDRESS));
printf("cmd:%u\n", MSG_GET_ADDRESS);
ioctl(fd, MSG_GET_ADDRESS, &addr);
perror("ioctl");
getchar();
printf("address:%x\n", addr);
close(fd);
}
What is the mistake i am making here.
Your MSG_GET_ADDRESS ioctl request code is defined as:
#define MSG_GET_ADDRESS _IOR(MSG_MAGIC_NUMBER, 4, unsigned long)
The size of the third parameter is encoded into the ioctl request code. The size can be extracted from the request code using the _IOC_SIZE(req) macro.
The numeric value of MSG_GET_ADDRESS will be different in a 32-bit process/kernel compared to a 64-bit process/kernel. In particular, the encoded size will be different.
On a 32-bit process/kernel, _IOC_SIZE(MSG_GET_ADDRESS) will be 4. On a 64-bit process/kernel, _IOC_SIZE(MSG_GET_ADDRESS) will be 8. This is due to the different sizeof(unsigned long) values on 32-bit and 64-bit systems.
When running a 32-bit process on a 64-bit kernel with 32-bit compatibility support, the 32-bit process will be calling ioctl() with the 32-bit version of the MSG_GET_ADDRESS request code. However, your driver's device_compat_ioctl() is looking for the 64-bit version of the MSG_GET_ADDRESS request code.
A solution is to define a 32-bit version of the ioctl request code in the driver to mirror the "official" MSG_GET_ADDRESS request code:
#define MSG32_GET_ADDRESS _IOR(MSG_MAGIC_NUMBER, 4, compat_ulong_t)
Note that this request code does not need to be in the user-mode headers as it is only for use in kernel mode. However, if it is more convenient, you could include it in the user-mode headers but wrapped in a #ifdef __KERNEL__ / #endif pair:
#ifdef __KERNEL__
#define MSG32_GET_ADDRESS _IOR(MSG_MAGIC_NUMBER, 4, compat_ulong_t)
#endif
Now, your device_compat_ioctl function should be changed to handle a MSG32_GET_ADDRESS request code instead of a MSG_GET_ADDRESS request code:
//address of kernel buffer
case MSG32_GET_ADDRESS:
put_user(0x12345678, (compat_ulong_t*)arg);
pr_info("MSG_GET_ADDRESS\n");
break;
Note: According to the comments in your code, the MSG_GET_ADDRESS is actually supposed to get the address of a kernel buffer. I don't know what your user-space code intends to do with it, but be aware that a 64-bit kernel address will not fit in a 32-bit unsigned long (or the 32-bit compat_ulong_t type).
I'm trying to run the test vector as described in BitTorrent BEP 44 test #1, but I'm not creating the same signature as they do:
305ac8aeb6c9c151fa120f120ea2cfb923564e11552d06a5d856091e5e853cff
1260d3f39e4999684aa92eb73ffd136e6f4f3ecbfda0ce53a1608ecd7ae21f01
Instead, the signature I create using libsodium is:
c44ad65291c2b1087218db8a43e3fa7b73cfa01b585b0ff9e6b962ed50e701a1
6065277417ff5bbae43d9b76e52129d27bf2e33e8b043ea67ace7ff91dae4d02
Using this code:
#include <string.h>
#include <stdio.h>
#include <sodium/crypto_sign.h>
// Test vector #1 from http://bittorrent.org/beps/bep_0044.html
// Using libsodium.
int main(int argc, char *argv[])
{
const char* buf = "3:seqi1e1:v12:Hello World!";
const char* sk =
"\xe0\x6d\x31\x83\xd1\x41\x59\x22\x84\x33\xed\x59\x92\x21\xb8\x0b"
"\xd0\xa5\xce\x83\x52\xe4\xbd\xf0\x26\x2f\x76\x78\x6e\xf1\xc7\x4d"
"\xb7\xe7\xa9\xfe\xa2\xc0\xeb\x26\x9d\x61\xe3\xb3\x8e\x45\x0a\x22"
"\xe7\x54\x94\x1a\xc7\x84\x79\xd6\xc5\x4e\x1f\xaf\x60\x37\x88\x1d";
unsigned char signature[crypto_sign_BYTES];
crypto_sign_detached(signature,
NULL,
(const unsigned char*) buf,
strlen(buf),
(const unsigned char*) sk);
char signed_buf[crypto_sign_BYTES * 2];
for (int i = 0; i < sizeof(signature); ++i) {
sprintf(signed_buf + i*2, "%.2x", signature[i]);
}
printf("%s\n", signed_buf);
}
Seems to be something silly I'm missing, but I just can't see it.
As explained here there appear to be (at least) two different formats for private keys. One of them is called ref10 and it is the one used by libsodium. It's composed of 32 bytes of seed concatenated with another 32 bytes of public key.
I couldn't find the name of the other format, but - as also explained in the above link - it's basically the seed hashed with sha512. More precisely
void ref10_to_lib(
unsigned char *private_key,
const unsigned char *ref10_private_key)
{
sha512(ref10_private_key, 32, private_key);
private_key[0] &= 248;
private_key[31] &= 63;
private_key[31] |= 64;
}
The BitTorrent specification uses the second format and to be able to use it, one must use the deprecated crypto_sign_edwards25519sha512batch function instead of crypto_sign_detached as such:
#include <string.h>
#include <stdio.h>
#include <sodium/crypto_sign.h>
#include <sodium/crypto_sign_edwards25519sha512batch.h>
// Test vector #1 from http://bittorrent.org/beps/bep_0044.html
// Using libsodium.
int main(int argc, char *argv[])
{
const char* buf = "3:seqi1e1:v12:Hello World!";
const char* sk =
"\xe0\x6d\x31\x83\xd1\x41\x59\x22\x84\x33\xed\x59\x92\x21\xb8\x0b"
"\xd0\xa5\xce\x83\x52\xe4\xbd\xf0\x26\x2f\x76\x78\x6e\xf1\xc7\x4d"
"\xb7\xe7\xa9\xfe\xa2\xc0\xeb\x26\x9d\x61\xe3\xb3\x8e\x45\x0a\x22"
"\xe7\x54\x94\x1a\xc7\x84\x79\xd6\xc5\x4e\x1f\xaf\x60\x37\x88\x1d";
unsigned char signature[crypto_sign_BYTES];
crypto_sign_edwards25519sha512batch(
signature,
NULL,
(const unsigned char*) buf,
strlen(buf),
(const unsigned char*) sk);
char signed_buf[crypto_sign_BYTES * 2];
for (int i = 0; i < sizeof(signature); ++i) {
sprintf(signed_buf + i*2, "%.2x", signature[i]);
}
printf("%s\n", signed_buf);
}
I want to set a jprobe hook on do_execve to catch every executed program.
My code is working on <= 3.2 linux kernel (debian). This is my output on linux kernel 3.2:
[ 628.534037] registered: do_execve, ret: 0
[ 723.995797] execve: /usr/bin/vi
[ 726.807025] execve: /bin/dmesg
on 4.1 kernel I the same result (everything is registered) but there is no "execve":
[ 8621.430568] registered: do_execve, ret: 0
And this is my code:
static struct jprobe jprobe_hooks[] = {
{
.entry = jdo_execve,
.kp = { .symbol_name = "do_execve" }
}};
static long jdo_execve(const char *filename, const char __user *const __user *argv, const char __user *const __user *envp, struct pt_regs *regs)
{
printk(KERN_INFO "execve: %s", filename );
}
//
// registration
//
int ret, x, reg_error;
reg_error = 0;
for (x = 0; x < sizeof(jprobe_hooks) / sizeof(jprobe_hooks[0]); x++)
{
ret = register_jprobe(&jprobe_hooks[x]);
if (ret < 0)
{
printk(KERN_INFO "register_jprobe failed, returned %d, item: %s\n", ret, jprobe_hooks[x].kp.symbol_name);
reg_error++;
}
else
{
printk(KERN_INFO "registered: %s, ret: %u\n", jprobe_hooks[x].kp.symbol_name, ret);
}
}
When I do a grep on kallsyms I get on 3.2:
grep do_execv /proc/kallsyms
ffffffff81100650 T do_execve
and on 4.2:
grep do_execv /proc/kallsyms
ffffffff811d2950 T do_execve
ffffffff811d2980 T do_execveat
I even tried to change the function (because do_execve prototype has changed) to this:
static int jdo_execve(struct filename *fname, const char __user *const __user *__argv, const char __user *const __user *__envp)
{
int i = 0;
printk(KERN_INFO "execve: %s ", fname->name );
}
and even that didn't help.
I can set hooks on other functions like do_fork or sys_open, but not on do_execve. Why? Anybody has ideas? Why is it not working anymore?
Edit:
I'm also hooking do_execveat:
static int jdo_execveat(int fd, struct filename *fname, const char __user *const __user *__argv, const char __user *const __user *__envp, int flags)
There are several problems that may prevent you jprobe messages:
You don't end print messages printk(KERN_INFO "execve: %s", filename ); with newline, thus you log buffer is not flushed.
API had changed. Now do_execve has filename parameter as struct filename.
Your jprobe code is silly: you don't have module entry, jprobe routine must end with jprobe_return() call and so on. Look at the samples in kernel source tree at "samples/kprobes"
Try to fix it - maybe it will help.
Anyway, I tried it by myself - here is the code - and things is, indeed, looks strange. When I load the module it registers 2 jprobes - one for do_execve, another for do_execveat. But I don't see any messages when I execute programs. BUT what I do see is periodical messages like this:
jprobe: execve: /usr/lib/systemd/systemd-cgroups-agent
It means that jprobe itself works, but not for every execve call.
So I wrote a simple C program to call execve just to be sure it's really called and I still nothing happens execpt systemd-cgroups-agent.
The variable filepath which is a string contains the value Música. I have the following code:
wstring fp(filepath.length(), L' ');
copy(filepath.begin(), filepath.end(), fp.begin());
fp then contains the value M?sica. How do I convert filepath to fp without losing the encoding for the ú character?
Use the function MultiByteToWideChar.
Sample code:
std::string toStdString(const std::wstring& s, UINT32 codePage)
{
unsigned int bufferSize = (unsigned int)s.length()+1;
char* pBuffer = new char[bufferSize];
memset(pBuffer, 0, bufferSize);
WideCharToMultiByte(codePage, 0, s.c_str(), (int)s.length(), pBuffer, bufferSize, NULL, NULL);
std::string retVal = pBuffer;
delete[] pBuffer;
return retVal;
}
std::wstring toStdWString(const std::string& s, UINT32 codePage)
{
unsigned int bufferSize = (unsigned int)s.length()+1;
WCHAR* pBuffer = new WCHAR[bufferSize];
memset(pBuffer, 0, bufferSize*sizeof(WCHAR));
MultiByteToWideChar(codePage, 0, s.c_str(), (int)s.length(), pBuffer, bufferSize);
std::wstring retVal = pBuffer;
delete[] pBuffer;
return retVal;
}
Since you are using MFC, you have access to the ATL String Conversion Macros.
This greatly simplifies the conversion vs. using MultiByteToWideChar. Assuming that filepath is encoded in your system's default code page, this should do the trick:
CA2W wideFilepath(filepath.c_str());
wstring fp(static_cast<const wchar_t*>(wideFilepath));
If filepath is not in your system's default code page (let's say it's in UTF-8), then you can specify the encoding to convert from:
CA2W wideFilepath(filepath.c_str(), CP_UTF8);
wstring fp(static_cast<const wchar_t*>(wideFilepath));
To convert the other way, from std::wstring to std::string, you would do this:
// Convert from wide (UTF-16) to UTF-8
CW2A utf8Filepath(fp.c_str(), CP_UTF8);
string utf8Fp(static_cast<const char*>(utf8Filepath));
// Or, convert from wide (UTF-16) to your system's default code page.
CW2A narrowFilepath(fp.c_str(), CP_UTF8);
string narrowFp(static_cast<const char*>(narrowFilepath));
Can someone please post a simple code that would convert,
System::String^
To,
C++ std::string
I.e., I just want to assign the value of,
String^ originalString;
To,
std::string newString;
Don't roll your own, use these handy (and extensible) wrappers provided by Microsoft.
For example:
#include <msclr\marshal_cppstd.h>
System::String^ managed = "test";
std::string unmanaged = msclr::interop::marshal_as<std::string>(managed);
You can easily do this as follows
#include <msclr/marshal_cppstd.h>
System::String^ xyz="Hi boys";
std::string converted_xyz=msclr::interop::marshal_as< std::string >( xyz);
Check out System::Runtime::InteropServices::Marshal::StringToCoTaskMemUni() and its friends.
Sorry can't post code now; I don't have VS on this machine to check it compiles before posting.
This worked for me:
#include <stdlib.h>
#include <string.h>
#include <msclr\marshal_cppstd.h>
//..
using namespace msclr::interop;
//..
System::String^ clrString = (TextoDeBoton);
std::string stdString = marshal_as<std::string>(clrString); //String^ to std
//System::String^ myString = marshal_as<System::String^>(MyBasicStirng); //std to String^
prueba.CopyInfo(stdString); //MyMethod
//..
//Where: String^ = TextoDeBoton;
//and stdString is a "normal" string;
Here are some conversion routines I wrote many years ago for a c++/cli project, they should still work.
void StringToStlWString ( System::String const^ s, std::wstring& os)
{
String^ string = const_cast<String^>(s);
const wchar_t* chars = reinterpret_cast<const wchar_t*>((Marshal::StringToHGlobalUni(string)).ToPointer());
os = chars;
Marshal::FreeHGlobal(IntPtr((void*)chars));
}
System::String^ StlWStringToString (std::wstring const& os) {
String^ str = gcnew String(os.c_str());
//String^ str = gcnew String("");
return str;
}
System::String^ WPtrToString(wchar_t const* pData, int length) {
if (length == 0) {
//use null termination
length = wcslen(pData);
if (length == 0) {
System::String^ ret = "";
return ret;
}
}
System::IntPtr bfr = System::IntPtr(const_cast<wchar_t*>(pData));
System::String^ ret = System::Runtime::InteropServices::Marshal::PtrToStringUni(bfr, length);
return ret;
}
void Utf8ToStlWString(char const* pUtfString, std::wstring& stlString) {
//wchar_t* pString;
MAKE_WIDEPTR_FROMUTF8(pString, pUtfString);
stlString = pString;
}
void Utf8ToStlWStringN(char const* pUtfString, std::wstring& stlString, ULONG length) {
//wchar_t* pString;
MAKE_WIDEPTR_FROMUTF8N(pString, pUtfString, length);
stlString = pString;
}
I found an easy way to get a std::string from a String^ is to use sprintf().
char cStr[50] = { 0 };
String^ clrString = "Hello";
if (clrString->Length < sizeof(cStr))
sprintf(cStr, "%s", clrString);
std::string stlString(cStr);
No need to call the Marshal functions!
UPDATE Thanks to Eric, I've modified the sample code to check for the size of the input string to prevent buffer overflow.
I spent hours trying to convert a windows form listbox ToString value to a standard string so that I could use it with fstream to output to a txt file. My Visual Studio didn't come with marshal header files which several answers I found said to use. After so much trial and error I finally found a solution to the problem that just uses System::Runtime::InteropServices:
void MarshalString ( String ^ s, string& os ) {
using namespace Runtime::InteropServices;
const char* chars =
(const char*)(Marshal::StringToHGlobalAnsi(s)).ToPointer();
os = chars;
Marshal::FreeHGlobal(IntPtr((void*)chars));
}
//this is the code to use the function:
scheduleBox->SetSelected(0,true);
string a = "test";
String ^ c = gcnew String(scheduleBox->SelectedItem->ToString());
MarshalString(c, a);
filestream << a;
And here is the MSDN page with the example:
http://msdn.microsoft.com/en-us/library/1b4az623(v=vs.80).aspx
I know it's a pretty simple solution but this took me HOURS of troubleshooting and visiting several forums to finally find something that worked.
C# uses the UTF16 format for its strings.
So, besides just converting the types, you should also be conscious about the string's actual format.
When compiling for Multi-byte Character set Visual Studio and the Win API assumes UTF8 (Actually windows encoding which is Windows-28591 ).
When compiling for Unicode Character set Visual studio and the Win API assume UTF16.
So, you must convert the string from UTF16 to UTF8 format as well, and not just convert to std::string.
This will become necessary when working with multi-character formats like some non-latin languages.
The idea is to decide that std::wstring always represents UTF16.
And std::string always represents UTF8.
This isn't enforced by the compiler, it's more of a good policy to have.
#include "stdafx.h"
#include <string>
#include <codecvt>
#include <msclr\marshal_cppstd.h>
using namespace System;
int main(array<System::String ^> ^args)
{
System::String^ managedString = "test";
msclr::interop::marshal_context context;
//Actual format is UTF16, so represent as wstring
std::wstring utf16NativeString = context.marshal_as<std::wstring>(managedString);
//C++11 format converter
std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> convert;
//convert to UTF8 and std::string
std::string utf8NativeString = convert.to_bytes(utf16NativeString);
return 0;
}
Or have it in a more compact syntax:
int main(array<System::String ^> ^args)
{
System::String^ managedString = "test";
msclr::interop::marshal_context context;
std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> convert;
std::string utf8NativeString = convert.to_bytes(context.marshal_as<std::wstring>(managedString));
return 0;
}
I like to stay away from the marshaller.
Using CString newString(originalString);
Seems much cleaner and faster to me. No need to worry about creating and deleting a context.
// I used VS2012 to write below code-- convert_system_string to Standard_Sting
#include "stdafx.h"
#include <iostream>
#include <string>
using namespace System;
using namespace Runtime::InteropServices;
void MarshalString ( String^ s, std::string& outputstring )
{
const char* kPtoC = (const char*) (Marshal::StringToHGlobalAnsi(s)).ToPointer();
outputstring = kPtoC;
Marshal::FreeHGlobal(IntPtr((void*)kPtoC));
}
int _tmain(int argc, _TCHAR* argv[])
{
std::string strNativeString;
String ^ strManagedString = "Temp";
MarshalString(strManagedString, strNativeString);
std::cout << strNativeString << std::endl;
return 0;
}
For me, I was getting an error with some of these messages. I have an std::string. To convert it to String^, I had to do the following String^ sysString = gcnew String(stdStr.c_str()); where sysString is a System::String^ and stdStr is an std::string. Hope this helps someone
You may have to #include <string> for this to work