CAN socketCAN - how reliable is for file transfer - linux

I want to send files (mp3 about 4MB each) to devices connected via CAN. I'm testing with Linux machine and ESP32. The problem is that not all data arrives to destination.
I'm sending from Linux machine using socketCAN and UCCB USB-CAN converter
https://www.tindie.com/products/lll7/can-usb-converter-uccb/ to ESP32, or between two ESP32.
When sending from PC Linux to ESP32, sometimes only 1/3 of mp3 file is saved, during sending I have candump slcan0 command running and it looks like all data is on the bus, but ESP32 does not see some frames.
When I send between two ESP32, for instance 4MB of data, receiving ESP32 gets around 3,98MB of data, some frames gets lost.
I'm using command "slcand -o -c -f -s8 /dev/ttyACM0 slcan0" to create slcan0 interface, although changing the speed with -s switch does not seem to work.
Am I missing something, or CAN is not suitable for such high speed, high load operations? I want to have 30 devices on canbus that receive around 1GB of data (mp3 files)
code below:
CanMgr::CanMgr(QObject *parent,LogModel *logModel):QObject(parent) {
this->logModel=logModel;
send_timer=new QTimer(this);
send_timer->setSingleShot(true);
send_timer->setInterval(1);
connect(send_timer,&QTimer::timeout,this,&CanMgr::processQueue);
}
void CanMgr::connectDevice() {
QString errorString;
m_canDevice = QCanBus::instance()->createDevice(m_pluginName,m_socketName,&errorString);
if (!m_canDevice) {
qDebug()<<QString("Error creating device '%1', reason: '%2'").arg(m_pluginName).arg(errorString);
return;
}
m_numberFramesWritten = 0;
connect(m_canDevice, &QCanBusDevice::errorOccurred, this, &CanMgr::processErrors);
connect(m_canDevice, &QCanBusDevice::framesReceived, this, &CanMgr::processReceivedFrames);
connect(m_canDevice, &QCanBusDevice::framesWritten, this, &CanMgr::processFramesWritten);
connect(m_canDevice, &QCanBusDevice::stateChanged,this, &CanMgr::stateChanged);
m_canDevice->setConfigurationParameter(QCanBusDevice::BitRateKey,"250000");
if (!m_canDevice->connectDevice()) {
qDebug()<<tr("Connection error: %1").arg(m_canDevice->errorString());
delete m_canDevice;
m_canDevice = nullptr;
} else {
QVariant bitRate = m_canDevice->configurationParameter(QCanBusDevice::BitRateKey);
if (bitRate.isValid()) {
qDebug()<<tr("Plugin: %1, connected to %2 at %3 kBit/s").arg(m_pluginName).arg(m_socketName).arg(bitRate.toInt() / 1000);
} else {
qDebug()<<tr("Plugin: %1, connected to %2").arg(m_pluginName).arg(m_socketName);
}
}
}
void CanMgr::sendFileContent(QByteArray data) {
quint32 frameId = 0;
quint32 dev_id=2;
quint32 cmd_id=0;
frameId=(dev_id<<16) | cmd_id;
m_canDevice->clear();
QByteArray size=Helper::byteArrFromInt((quint32)data.size(),8);
qDebug()<<"CanMgr::sendFileContent file size in bytes:"<<Helper::printHex(size);
QCanBusFrame frame = QCanBusFrame(frameId,size);
frame.setExtendedFrameFormat(true);
qDebug()<<"frame data:"<<frame.toString()<<" stat:"<<m_canDevice->state();
queue.append(frame);
frameId = 0;
dev_id=2;
cmd_id=1;
frameId=(dev_id<<16) | cmd_id;
for(int i=0;i<data.size();i+=8) {
QCanBusFrame frame = QCanBusFrame(frameId, data.mid(i,8));
frame.setExtendedFrameFormat(true);
queue.append(frame);
}
frameId = 0;
dev_id=2;
cmd_id=2;
frameId=(dev_id<<16) | cmd_id;
frame = QCanBusFrame(frameId, size);
frame.setExtendedFrameFormat(true);
queue.append(frame);
process_frame=false;
send_timer->start();
}
void CanMgr::processQueue() {
if(queue.isEmpty()) {
qDebug()<<"CanMgr::processQueu queue empty";
return;
}
if(process_frame) {
;
}
else {
curr_frame=queue.dequeue();
process_frame=true;
}
qDebug()<<"CanMgr::processQueue frame data:"<<curr_frame.toString()<<" towrite:"<<m_canDevice->framesToWrite()<<" left:"<<queue.count();
m_canDevice->writeFrame(curr_frame);
}
void CanMgr::processFramesWritten(qint64 count) {
qDebug()<<"CanMgr::processFramesWritten count:"<<count;
process_frame=false;
this->processQueue();
}
QString CanMgr::processErrors(QCanBusDevice::CanBusError error) const
{
QString err_str;
switch (error) {
case QCanBusDevice::ReadError:
case QCanBusDevice::WriteError:
case QCanBusDevice::ConnectionError:
case QCanBusDevice::ConfigurationError:
case QCanBusDevice::UnknownError:
err_str= m_canDevice->errorString();
qDebug()<<"Error:"<<err_str;
send_timer->start();
return err_str;
break;
default:
break;
}
}
Best,
Marek

Related

Distorted microphone audio when the loudspeaker is enabled (Xamarin.iOS)

I am maintaining a Push-to-talk VoIP app.
When a PTT call is running the app create an audio session
m_AudioSession = AVAudioSession.SharedInstance();
NSError error;
if (!m_AudioSession.SetCategory(AVAudioSession.CategoryPlayAndRecord, AVAudioSessionCategoryOptions.DefaultToSpeaker | AVAudioSessionCategoryOptions.AllowBluetooth, out error))
{
IOSErrorLogger.Log(DammLoggerLevel.Error, TAG, error, "Error setting the category");
}
if (!m_AudioSession.SetMode(AVAudioSession.ModeVoiceChat, out error))
{
IOSErrorLogger.Log(DammLoggerLevel.Error, TAG, error, "Error setting the mode");
}
if (!m_AudioSession.OverrideOutputAudioPort(AVAudioSessionPortOverride.Speaker, out error))
{
IOSErrorLogger.Log(DammLoggerLevel.Error, TAG, error, "Error redirecting the audio to the loudspeaker");
}
if (!m_AudioSession.SetPreferredIOBufferDuration(0.06, out error)) // 60 milli seconds
{
IOSErrorLogger.Log(DammLoggerLevel.Error, TAG, error, "Error setting the preferred buffer duration");
}
if (!m_AudioSession.SetPreferredSampleRate(8000, out error)) // kHz
{
IOSErrorLogger.Log(DammLoggerLevel.Error, TAG, error, "Error setting the preferred sample rate");
}
if (!m_AudioSession.SetActive(true, out error))
{
IOSErrorLogger.Log(DammLoggerLevel.Error, TAG, error, "Error activating the audio session");
}
The received audio is played using the OutputAudioQueue and the microphone audio is captured (as mentioned in the Apple Doc: https://developer.apple.com/documentation/avfaudio/avaudiosession/mode/1616455-voicechat) using a Voice-Processing I/O Unit.
The initialization code for Voice-Processing I/O Unit is:
AudioStreamBasicDescription audioFormat = new AudioStreamBasicDescription()
{
SampleRate = SAMPLERATE_8000,
Format = AudioFormatType.LinearPCM,
FormatFlags = AudioFormatFlags.LinearPCMIsSignedInteger | AudioFormatFlags.LinearPCMIsPacked,
FramesPerPacket = 1,
ChannelsPerFrame = CHANNELS,
BitsPerChannel = BITS_X_SAMPLE,
BytesPerPacket = BYTES_X_SAMPLE,
BytesPerFrame = BYTES_X_FRAME,
Reserved = 0
};
AudioComponent audioComp = AudioComponent.FindComponent(AudioTypeOutput.VoiceProcessingIO);
AudioUnit.AudioUnit voiceProcessing = new AudioUnit.AudioUnit(audioComp);
AudioUnitStatus unitStatus = AudioUnitStatus.NoError;
unitStatus = voiceProcessing.SetEnableIO(true, AudioUnitScopeType.Input, ELEM_Mic);
if (unitStatus != AudioUnitStatus.NoError)
{
DammLogger.Log(DammLoggerLevel.Warn, TAG, "Audio Unit SetEnableIO(true, AudioUnitScopeType.Input, ELEM_Mic) returned: {0}", unitStatus);
}
unitStatus = voiceProcessing.SetEnableIO(true, AudioUnitScopeType.Output, ELEM_Speaker);
if (unitStatus != AudioUnitStatus.NoError)
{
DammLogger.Log(DammLoggerLevel.Warn, TAG, "Audio Unit SetEnableIO(false, AudioUnitScopeType.Output, ELEM_Speaker) returned: {0}", unitStatus);
}
unitStatus = voiceProcessing.SetFormat(audioFormat, AudioUnitScopeType.Output, ELEM_Mic);
if (unitStatus != AudioUnitStatus.NoError)
{
DammLogger.Log(DammLoggerLevel.Warn, TAG, "Audio Unit SetFormat (MIC-OUTPUT) returned: {0}", unitStatus);
}
unitStatus = voiceProcessing.SetFormat(audioFormat, AudioUnitScopeType.Input, ELEM_Speaker);
if (unitStatus != AudioUnitStatus.NoError)
{
DammLogger.Log(DammLoggerLevel.Warn, TAG, "Audio Unit SetFormat (ELEM 0-INPUT) returned: {0}", unitStatus);
}
unitStatus = voiceProcessing.SetRenderCallback(AudioUnit_RenderCallback, AudioUnitScopeType.Input, ELEM_Speaker);
if (unitStatus != AudioUnitStatus.NoError)
{
DammLogger.Log(DammLoggerLevel.Warn, TAG, "Audio Unit SetRenderCallback returned: {0}", unitStatus);
}
...
voiceProcessing.Initialize();
voiceProcessing.Start();
And the RenderCallback function is:
private AudioUnitStatus AudioUnit_RenderCallback(AudioUnitRenderActionFlags actionFlags, AudioTimeStamp timeStamp, uint busNumber, uint numberFrames, AudioBuffers data)
{
AudioUnit.AudioUnit voiceProcessing = m_VoiceProcessing;
if (voiceProcessing != null)
{
// getting microphone input signal
var status = voiceProcessing.Render(ref actionFlags, timeStamp, ELEM_Mic, numberFrames, data);
if (status != AudioUnitStatus.OK)
{
return status;
}
if (data.Count > 0)
{
unsafe
{
short* samples = (short*)data[0].Data.ToPointer();
for (uint idxSrcFrame = 0; idxSrcFrame < numberFrames; idxSrcFrame++)
{
... send the collected microphone audio (samples[idxSrcFrame])
}
}
}
}
return AudioUnitStatus.NoError;
}
I am facing the problem that if the loudspeaker is enabled: m_AudioSession.OverrideOutputAudioPort(AVAudioSessionPortOverride.Speaker, out error)
then the microphone audio is corrupted (some times is impossible to understand the speech).
If the loudspeaker is NOT enabled (the AVAudioSessionPortOverride.Speaker is not set) then the audio is very nice.
I have already verified that the NumberChannels in the AudioBuffer returned by the Render function is 1 (mono audio).
Any hit helping solved the problem is very appreciated. Thanks
Update:
The AudioUnit_RenderCallback method is called every 32 ms. When the loudspeaker is disabled the received number of frames is 256 which is exact (sample rate is 8000). When the loudspeaker is enabled the received number of frames is 85.
In both cases the GetAudioFormat returns the expected values: BitsPerChannel=16, BytesPerFrame=2, FramesPerPacket=1, ChannelsPerFrame=1, SampleRate=8000
Update:
I end up using the Sample Rate from the Hardware and performing the down-sampling self. It is must understanding that the Audio Unit should be able to perform the down sampling https://developer.apple.com/library/archive/documentation/MusicAudio/Conceptual/AudioUnitHostingGuide_iOS/AudioUnitHostingFundamentals/AudioUnitHostingFundamentals.html#//apple_ref/doc/uid/TP40009492-CH3-SW11)) but it was not possible for me to make it working when the loudspeaker was enabled.
I hope you are testing this on an actual device and not a simulator.
In the code, have you tried using this:
sampleRate = AudioSession.CurrentHardwareSampleRate;
Rather than forcing the sample rate, it's best to check the sample rate from the Hardware. It could be that during loudspeaker usage, it changes the sample rate and thus creating an issue.
I would suggest recording based on the above changes and see if the audio improves and then experiment with other flags.
Standard recording pattern:
https://learn.microsoft.com/en-us/dotnet/api/audiotoolbox.audiostreambasicdescription?view=xamarin-ios-sdk-12#remarks

JUCE - play audio input back

I am learning JUCE and I am writing a program that just reads the input from the audio card and plays it back. Obviously this is just for learning purposes. I am using the audio application template. This is the code inside the getNextAudioBlock() function:
void getNextAudioBlock (const AudioSourceChannelInfo& bufferToFill) override
{
if(true) // this is going to be replaced by checking the value of a button
{
const int channel = 0;
if(true) // this is going to be replaced too
{
const float* inBuffer = bufferToFill.buffer->getReadPointer(channel, bufferToFill.startSample);
float* outBuffer = bufferToFill.buffer->getWritePointer(channel, bufferToFill.startSample);
for(int sample = 0; sample < bufferToFill.numSamples; ++sample)
outBuffer[sample] = inBuffer[sample];
}
else
{
bufferToFill.buffer->clear(0, bufferToFill.startSample, bufferToFill.numSamples);
}
}
else
{
bufferToFill.buffer->clear(0, bufferToFill.startSample, bufferToFill.numSamples);
}
}
The code is really simple: the content from the input buffer is copied directly to the output buffer. However, I am not hearing anything. What am I doing wrong?

visual c++ Serialport class handling data very slow

I'm trying to handle data recieved at about 1Hz from a robot through a serialport (bluetooth connection). I will recieve data with different headers determining what data will be recieved and what the expected length of the message is.
for example:
Header: sensorvalues (0x32) -> expected length 11 incl. header.
First i want to check if the byte is a header and if thats the case extract the expected length (in bytes).
I am using vc++ and the serialport class in the GUI.
The "seirialport->read(buffer, 0, expected length)" is very slow and not very reliable. I found some tips at http://www.sparxeng.com/blog/software/must-use-net-system-io-ports-serialport but the buffer still builds up to the point where I have about 200 bytes in the buffer, which is not very good when i need live sensorvalues displayed on the GUI.
The key parts of my code are:
System::Void MyForm::serialPort1_DataReceived_1(System::Object^ sender, System::IO::Ports::SerialDataReceivedEventArgs^ e) {
if (serialPort1->BytesToRead > 0){
if (write_position == 0){
serialPort1->BaseStream->ReadAsync(data_recieved_buffer, 0, 1);
header = data_recieved_buffer[0];
if (this->InvokeRequired){
myrecievedata_delegate^ d = gcnew myrecievedata_delegate(&myrecievedata);
this->Invoke(d, gcnew array < Object^ > {'h'});
}
else
{
myrecievedata('h');
}
}
else if (this->serialPort1->BytesToRead > expected_length - 1)
{
serialPort1->BaseStream->ReadAsync(data_recieved_buffer, 0, expected_length - 1);
if (this->InvokeRequired){
myrecievedata_delegate^ d = gcnew myrecievedata_delegate(&myrecievedata);
this->Invoke(d, gcnew array < Object^ > {'b'});
}
else
{
myrecievedata('b');
}
}
else{
return;
}
}
and the recieved data is sent to
System::Void MyForm::myrecievedata(char status){
if (status == 'h'){
handleheader(header);
}
else if (status == 'b'){
handlebyte();
}
Is the problem at the serialport_datarecieved event? I can only think of invoke (which I have very little knowledge about) being the problem, still keeping the work in the serialport thread.
If that is the case how would I make sure that the data is handled in a different thread?
Thanks in advance!

Multithreaded FTP server socket VC++

I am writing a basic FTP client server code in VC++ with multithreading.
The code works fine with single client but it doesn't work for 2 clients. I don't know how socket keep information for multiple clients.
If I do CD from on client it reflects in other one too. I mean if two different clients cannot work on different server directory.
May be something very basic I am missing here due to my lack of knowledge of socket programming.
code to accept connections:
void acceptUserConnections()
{
calen=sizeof(ca);
int thread_count = 0;
thread t[num_threads];
while(1)
{
cout<<"Accepting user connections: "<<endl;
if((cs=accept(serverSocket,&ca.generic,&calen))==INVALID_SOCKET)
throw "Couldn't accept connection\n";
string userAddress(inet_ntoa(ca.ca_in.sin_addr));
thread_count = thread_count +1;
t[thread_count] = thread(handleUserConnection, cs);
t[thread_count].detach();
//handleUserConnection(clientSocket);
}
}
}
to handle:
void handleUserConnection(SOCKET clientSocket)
{
if (strcmpi(command.c_str(),"PWD")==0)
{
ftpPWD(clientSocket);
}
}
for CD
void ftpCD(string directory, SOCKET clientSocket)
{
memset(szbuffer,'\0',1024);
if(!SetCurrentDirectory(directory.c_str()))
{
cout<<strerror(errno)<<endl;
sprintf(szbuffer,"System cannot find the specified directory.");
if(send(clientSocket,szbuffer,1024,0) == SOCKET_ERROR)
throw SEND_FAILED_MSG;
}
else
{
sprintf(szbuffer,"Directory changed successfully.");
if(send(clientSocket,szbuffer,1024,0) == SOCKET_ERROR)
throw SEND_FAILED_MSG;
}
}
for PWD
void ftpPWD(SOCKET clientSocket)
{
memset(szbuffer,'\0',1024);
int nBufferLength =GetCurrentDirectory(MAX_PATH, pwd);
if(!nBufferLength)
{
sprintf(szbuffer,"Failed to get current directory");
if(send(clientSocket,szbuffer,1024,0) == SOCKET_ERROR )
throw SEND_FAILED_MSG;
}
else
{
sprintf(szbuffer,(string(pwd)).c_str());
if(send(clientSocket,szbuffer,1024,0) == SOCKET_ERROR )
throw SEND_FAILED_MSG;
}
}
}

JList updating with live captured packets info (using jNetPcap) is causing list blanking

I am coding app which is capturing packet from 2 NI Cards at the same time in specific thread for them.
I am using jnetPcap and I am getting captured packet in jpackethanler's method nextPacket i need to show info from the current packet in JList but when I use simply defaultListModel and i write model1.addElement(packetinfo) then JList randomly goes in blank.
My code :
new Thread(){
#Override
public void run(){
StringBuilder errbuf = new StringBuilder(); // For any error msgs
int snaplen = 64 * 1024; // Capture all packets, no trucation
int flags = Pcap.MODE_PROMISCUOUS; // capture all packets
int timeout = 10 * 1000; // 10 seconds in millis
Pcap pcap1 =
Pcap.openLive(Variables.getDevice1().getName(), snaplen, flags, timeout, errbuf);
if (pcap1 == null) {
System.err.printf("Error while opening device for capture: "
+ errbuf.toString());
return;
}
PcapPacketHandler<String> jpacketHandler1 = new PcapPacketHandler<String>() {
int count = 1;
#Override
public void nextPacket(PcapPacket packet, String user) {
// ALL PACKETS FROM DEVICE 1 HERE
int packetSize = packet.size();
int packetCount = count++;
String desc = String.format("No.: %15d | HDRSize : %-4d", packetCount,packetSize);
device1Model.addElement(desc); // this adds desc to JLIST
}
};
pcap1.loop(Pcap.LOOP_INFINITE, jpacketHandler1, "");
pcap1.close();
}
}.start();
What do change to be more smooth and at the same time there will be no packet looses. Because i need to catch every packet for right function of app.
Thank You.

Resources