Modified the below circular queue code for my app.
This queue can hold 32 elements max and I have declared the elements as a structure array inside the class. For adding an element to the queue you have to call CreateElement() functions, which checks for a free element and returns an index. When I reuse an element after processing the following line in the CreateElement functions crashes
boost::shared_array<char> tData(new char[bufferSize]);
m_QueueStructure[queueElems].data = tData;
As per documentation, the assignment operator is supposed to destroy the earlier object and assign the new one. Why is it crashing? Can someone tell me where am I screwing?
#include "boost/thread/condition.hpp"
#include "boost/smart_ptr/shared_array.hpp"
#include <queue>
#define MAX_QUEUE_ELEMENTS 32
typedef struct queue_elem
{
bool inUse;
int index;
int packetType;
unsigned long compressedLength;
unsigned long uncompressedLength;
boost::shared_array<char> data;
}Data;
class CQueue
{
private:
int m_CurrentElementsOfQueue;
std::queue<Data> the_queue;
mutable boost::mutex the_mutex;
boost::condition_variable the_condition_variable;
Data m_QueueStructure[MAX_QUEUE_ELEMENTS];
public:
CQueue()
{
m_CurrentElementsOfQueue = 0;
for(int i = 0; i < MAX_QUEUE_ELEMENTS; i++)
{
m_QueueStructure[i].inUse = false;
m_QueueStructure[i].index = i;
}
}
~CQueue()
{
for(int i = 0; i < m_CurrentElementsOfQueue; i++)
{
int index = wait_and_pop();
Data& popped_value = m_QueueStructure[index];
popped_value.inUse = false;
}
m_CurrentElementsOfQueue = 0;
}
void push(Data const& data)
{
boost::mutex::scoped_lock lock(the_mutex);
the_queue.push(data);
lock.unlock();
the_condition_variable.notify_one();
}
bool empty() const
{
boost::mutex::scoped_lock lock(the_mutex);
return the_queue.empty();
}
bool try_pop(Data& popped_value)
{
boost::mutex::scoped_lock lock(the_mutex);
if(the_queue.empty())
{
return false;
}
popped_value=the_queue.front();
the_queue.pop();
return true;
}
int wait_and_pop()
{
boost::mutex::scoped_lock lock(the_mutex);
while(the_queue.empty())
{
the_condition_variable.wait(lock);
}
Data& popped_value=the_queue.front();
the_queue.pop();
return popped_value.index;
}
int CreateElement(int bufferSize, unsigned long _compressedLength,
unsigned long _uncompressedLength, int _packetType) /* Send data length for this function */
{
int queueElems = 0;
if(m_CurrentElementsOfQueue == 32)
{
CCommonException ex(QERROR, QUEUE_FULL, "Circular Buffer Queue is full");
throw ex;
}
for(queueElems = 0; queueElems < MAX_QUEUE_ELEMENTS; queueElems++)
{
if(m_QueueStructure[queueElems].inUse == false)
break;
}
boost::shared_array<char> tData(new char[bufferSize]);
m_QueueStructure[queueElems].data = tData;
m_QueueStructure[queueElems].inUse = true;
m_QueueStructure[queueElems].compressedLength = _compressedLength;
m_QueueStructure[queueElems].uncompressedLength = _uncompressedLength;
m_QueueStructure[queueElems].packetType = _packetType;
m_CurrentElementsOfQueue++;
return queueElems;
}
Data& GetElement(int index)
{
Data& DataElement = m_QueueStructure[index];
return DataElement;
}
void ClearElementIndex(Data& delValue)
{
m_CurrentElementsOfQueue--;
delValue.inUse = false;
}
};
for(queueElems = 0; queueElems < MAX_QUEUE_ELEMENTS; queueElems++) after looping queueElems has value 32 but in your m_QueueStructure only 32 elements so you trying to access m_QueueStructure[queueElems].data to 33rd element. That the problem.
EDIT: try use m_QueueStructure[queueElems].data.reset(new char[bufferSize]);
Solved the problem. Two changes I did. In the wait_and_pop function, I was returning an index rather than a Data&. When I returned Data&, that solved the assignment problem. Another crash was happening due to a memset of a shared_array.get(). Lesson learnt, never memset a shared_array or a shared_ptr.
Related
Can you help me about the code below.
Is it writing 1 to the data memory or to the internal memory.
there are only 32 internal registers
The processor is 32 bit risc-v based
thanks in advance
#include "string.h"
#define DEBUG_IF_ADDR 0x00002010
void bubble_sort(int* arr, int len)
{
int sort_num;
do
{
sort_num = 0;
for(int i=0;i<len-1;i++)
{
if(*(arr+i) > *(arr+i+1))
{
int tmp = *(arr+i);
*(arr+i) = *(arr+i+1);
*(arr+i+1) = tmp;
sort_num++;
}
}
}
while(sort_num!=0);
}
int main()
{
int unsorted_arr[] = {195,14,176,103,54,32,128};
int sorted_arr[] = {14,32,54,103,128,176,195};
bubble_sort(unsorted_arr,7);
int *addr_ptr = DEBUG_IF_ADDR;
if(memcmp((char*) sorted_arr, (char*) unsorted_arr, 28) == 0)
{
//success
*addr_ptr = 1;
}
else
{
//failure
*addr_ptr = 0;
}
return 0;
}
I failed to pass the Valgrind tests and couldn't figure out what went wrong with my code. It seems like the issue is in the load() function as the Valgrind tests pointed out at the malloc() line. Could anyone help me take a look? Any guidance would be appreciated. Thank you!
Here is my code:
#include <ctype.h>
#include <stdbool.h>
#include <stdio.h>
#include <strings.h>
#include <string.h>
#include <stdlib.h>
#include "dictionary.h"
// Represents a node in a hash table
typedef struct node
{
char word[LENGTH + 1];
struct node *next;
}
node;
// TODO: Choose number of buckets in hash table
const unsigned int N = 100;
// Hash table
node *table[N];
int count =0;
// Returns true if word is in dictionary, else false
bool check(const char *word)
{
// TODO
int i = hash(word);
node *cursor = table[i];
if (table[i] == NULL)
{
return false;
}
else
{
while(cursor!= NULL)
{
if(strcasecmp(cursor->word, word) == 0)
{
return true;
}
else
{
cursor = cursor->next;
}
}
}
return false;
}
// Hashes word to a number
unsigned int hash(const char *word)
{
// TODO: Improve this hash function
int bucket;
if(word[1] != 0)
{
bucket = (((toupper(word[0])-'A') * (toupper(word[1]- 'A')))% 10 + (toupper(word[0])-'A'));
}
else
{
bucket = (((toupper(word[0])-'A') * (toupper(word[0])-'A'))%10 + (toupper(word[0])-'A'));
}
return bucket;
}
// Loads dictionary into memory, returning true if successful, else false
bool load(const char *dictionary)
{
// TODO 1
//open the dictionary
FILE *file = fopen(dictionary, "r");
if(file == NULL)
{
printf("Can't load the dictionary\n");
return false;
}
//read string from file one at a time
char word[LENGTH + 1];
for (int i=0; i < N; i++)
{
table[i] = NULL;
}
while(fscanf(file, "%s", word) != EOF)
{
node *n = malloc(sizeof(node));
//create a new node for each word
if(n == NULL)
{
unload();
return false;
}
strcpy(n->word, word);
n->next = NULL;
count++;
char *c = n->word;
int number = hash(c);
if (table[number] != NULL)
{
//point the new node to the first node existing in the table
n->next = table[number];
//point the header to the new node
table[number] = n;
}
else
{
//n->next = NULL;
table[number] = n;
}
}
fclose(file);
return true;
}
// Returns number of words in dictionary if loaded, else 0 if not yet loaded
unsigned int size(void)
{
// TODO
return count;
//return 0;
}
// Unloads dictionary from memory, returning true if successful, else false
bool unload(void)
{
for (int i = 0; i > N; i++)
{
node *cursor = table[i];
while(cursor != NULL)
{
node *tmp = cursor;
cursor = cursor->next;
free(tmp);
}
free(cursor);
}
// TODO
return true;
}
Here is what the Valgrind tests show:
Valgrind tests
c.99 is this line -> node *n = malloc(sizeof(node));
The problem is in unload. It doesn't free any nodes. Review this line carefully and critically, it contains the error.
for (int i = 0; i > N; i++)
I tried using malloc on this table - struct that contains rows that contain cells. First I allocate the table, then succesfully add and initialize first row, but when trying to set cell count on second row it crashes - I have no idea why, probably some leftover from previous pointer? Or do I need to allocate memory even for props of the structs? This is my first time digging into malloc, so sorry if it is something trivial.
typedef struct cell_t
{
char* content;
int contentLength;
} cell_t;
typedef struct row_t
{
cell_t* cells[100];
int cellCount;
} row_t;
typedef struct table_t
{
row_t* rows[100];
int rowCount;
} table_t;
row_t* allocateRow()
{
row_t* allocatedRow;
allocatedRow = malloc(sizeof(row_t*));
if (allocatedRow)
{
printf("THIS PRINTS TWICE");
allocatedRow->cellCount = 0;
printf("THIS PRINTS ONCE");
}
else
{
printf("FAILED TO ALLOCATE ROW!");
}
return allocatedRow;
}
void addRow(table_t* tableToAddTo, int nToAllocate)
{
while (tableToAddTo->rowCount < nToAllocate)
{
tableToAddTo->rows[tableToAddTo->rowCount] = allocateRow();
tableToAddTo->rowCount++;
}
}
int main()
{
table_t* inputTable = malloc(sizeof(table_t));
if (inputTable)
{
inputTable->rowCount = 0;
}
else
{
printf("FAILED TO ALLOCATE TABLE!");
return 1;
}
addRow(inputTable, 5);
for (int i = 0; i < inputTable->rowCount; i++)
{
free(inputTable->rows[i]);
}
free(inputTable);
return 0;
}
In allocateRow() change
allocatedRow = malloc(sizeof(row_t*));
into
allocatedRow = malloc(sizeof(row_t));
And you will need to allocate cells or change
cell_t* cells[100];
into
cell_t cells[100];
Basically, when overloading operator (operator--) constructor is called
my program crashes, changing classes arrays from dynamic, to static ones,
solves the problem, why is that?
If classes arrays are change from dynamic to static, overloading works
great, but this isn't a solution I am looking for.
The working version with static array:
#include <iostream>
#include<string>
using namespace std;
const int TABLE = 10;
class Player()
{
private:
int health;
string A[TABLE][TABLE];
public:
Player()
{
health = 17;
for (int i = 0; i < TABLE; i++)
{
for (int j = 0; j < TABLE; j++)
A[i][j] = "-";
}
}
Player(int new_health)
{
health = new_health;
}
Player operator--()
{
health--;
return Player(health);
}
~Player(){}
};
int main()
{
Player p1; // Creates object p1 and calls Player(), initializing
health variable, and string A array.
--p1; // Decrements p1 health value
return 0;
}
Dynamic array version:
#include <iostream>
#include<string>
using namespace std;
const int TABLE = 10;
class Player()
{
private:
int health;
string **A;
public:
Player()
{
health = 17;
A = new string*[TABLE];
for (int i = 0; i < TABLE; i++)
{
A[i] = new string[TABLE];
}
for (int i = 0; i < TABLE; i++)
{
for (int j = 0; j < TABLE; j++)
A[i][j] = "-";
}
}
Player(int new_health)
{
health = new_health;
}
Player operator--()
{
health--;
return Player(health);
}
~Player()
{
for (int i = 0; i < TABLE; i++)
delete[] A[i];
delete[] A;
}
};
int main()
{
Player p1; // Creates object p1, calls Player(), set health value,
initializes dynamic array
--p1; // Produces a crash with exit status -1
return 0;
}
No error messages, the crash doesn't happen at the health = new_health
part, it crashes as soon as Player(int new_health) constructor has done
its job. How can dynamic arrays affect overloading constructor and cause a crash?
The error is occurring because the new player instance that you create in your -- overload is getting destructed; however, your constructor overload never initialized your dynamic array. So when the destructor is called you are trying to delete unallocated memory.
I'm trying to transfer a command line code that I have to a more visual program with a
GUI to enable easier use. The original code was in C++, so I'm using Visual C++ that is
available in Visual Studio Express 2012, but I have problems understanding the "new"
managed C++/CLI way of handling objects. Being new to CLI and managed C++, I was wondering
if someone can explain what I am doing wrong, and why it doesn't work. Now here is a
description of the code and the problem.
The program is essentially an optimization program:
There are multiple boxes (modes) in a system, each mode, depending on its type has a
few numerical coefficients that control its behavior and the way it responds to outside
excitation.
The program asks the user to specify the number of boxes and the type of each box.
Then tries to find the numerical coefficients that minimize the difference between
the system response with those obtained experimentally.
So, the UI has means for user to open the experimental result files, specify the number
of modes, and specify the type of each mode. Then, the user can initiate the processing
function by clicking on a start button, that initiates a background worker.
Following the example given in MSDN, I created a class that performs the work:
ref class curveFit
{
public: ref class CurrentState{
public:
int percentage;
int iterationNo;
int stage;
bool done;
multimode systemModel;
};
public:
int modes;
int returncode;
array<double> ^expExcitations;
array<double> ^expResults;
multimode systemModel;
private:
void fcn(int, int, double*, double*, int*);
double totalError(std::vector<double> &);
public:
delegate void fcndelegate(int, int, double*, double*, int*);
public:
curveFit(void);
curveFit^ fit(System::ComponentModel::BackgroundWorker^, System::ComponentModel::DoWorkEventArgs^, Options^);
};
multimode is just a container class: a list of different boxes.
ref class multimode
{
private:
Collections::Generic::List<genericBoxModel ^>^ models;
int modes;
public:
multimode(void);
multimode(const multimode%);
int modeNo(void);
void Add(genericBoxModel^);
void Clear();
genericBoxModel^ operator[](int);
multimode% operator=(const multimode%);
double result(double);
bool isValid();
std::vector<double> MapData();
void MapData(std::vector<double> &);
};
multimode::multimode(void)
{
models = gcnew Collections::Generic::List<genericBoxModel ^>();
modes = 0;
}
multimode::multimode(const multimode% rhs)
{
models = gcnew Collections::Generic::List<genericBoxModel ^>();
for(int ind = 0; ind < rhs.modes; ind++)
models->Add(rhs.models[ind]);
modes = rhs.modes;
}
int multimode::modeNo(void)
{
return modes;
}
void multimode::Add(genericBoxModel^ model)
{
models->Add(model);
modes++;
}
void multimode::Clear()
{
models->Clear();
modes = 0;
}
genericBoxModel^ multimode::operator[](int ind)
{
return models[ind];
}
multimode% multimode::operator=(const multimode% rhs)
{
models->Clear();
for(int ind = 0; ind < rhs.modes; ind++)
models->Add(rhs.models[ind]);
modes = rhs.modes;
return *this;
}
double multimode::result(double excitation)
{
double temp = 0.0;
for(int ind = 0; ind < modes; ind++)
temp += models[ind]->result(excitation);
return temp;
}
bool multimode::isValid()
{
bool isvalid = true;
if(modes < 1)
return false;
for(int ind = 0; ind < modes; ind++)
isvalid = (isvalid && models[ind]->isValid());
return isvalid;
}
std::vector<double> multimode::fullMap()
{
//Map the model coefficients to a vector of doubles
...
}
void multimode::fullMap(std::vector<double> &data)
{
//Map a vector of doubles to the model coefficients
...
}
and genericBoxModel is an abstract class that all box models are based on.
The curvefit::fit function does the optimization based on the options passed to it:
curveFit^ curveFit::fit(System::ComponentModel::BackgroundWorker^ worker, System::ComponentModel::DoWorkEventArgs^ e, Options^ opts)
{
fcndelegate^ del = gcnew fcndelegate(this, &curveFit::fcn);
std::vector<double> data;
CurrentState^ state = gcnew CurrentState;
state->done = false;
state->stage = 0;
state->percentage = 0;
state->systemModel = systemModel;
worker->ReportProgress(state->percentage, state);
switch(opts->optimizationMethod)
{
case 0:
while(iterationNo < maxIterations)
{
data = systemModel.MapData();
OptimizationMethod0::step(some_parameters, data, (optmethods::costfunction)Runtime::InteropServices::Marshal::GetFunctionPointerForDelegate(del).ToPointer());
systemModel.MapData(data);
iterationNo++;
state->percentage = 0;
state->systemModel = systemModel;
worker->ReportProgress(state->percentage, state);
}
...
}
}
I'm passing the system model inside the state so that I can display the results of the
latest step on the screen, which doesn't work, but that is another question :-)
The start button calls the curvefit::fit function after initializing the system model:
private: System::Void btnStart_Click(System::Object^ sender, System::EventArgs^ e) {
systemModel.Clear();
for(int mode = 0; mode < modes; mode++)
{
switch(model)
{
case 0:
systemModel.Add(gcnew model0);
systemModel[mode]->coefficients[0] = 100.0 / double(mode + 1);
...
break;
...
}
}
btnStart->Enabled = false;
stStatusText->Text = "Calculating!";
Application::UseWaitCursor = true;
curveFit^ cf = gcnew curveFit;
fitCurve->RunWorkerAsync(cf);
}
private: System::Void fitCurve_DoWork(System::Object^ sender, System::ComponentModel::DoWorkEventArgs^ e) {
System::ComponentModel::BackgroundWorker^ worker;
worker = dynamic_cast<System::ComponentModel::BackgroundWorker^>(sender);
curveFit^ cf = safe_cast<curveFit^>(e->Argument);
cf->expExcitations = gcnew array<double>(expExcitations.Count);
expExcitations.CopyTo(cf->expExcitations);
cf->expResults = gcnew array<double>(expResults.Count);
expResults.CopyTo(cf->expResults);
cf->systemModel = systemModel;
cf->modes = modes;
e->Result = cf->fit(worker, e, options);
}
This works perfectly! But, in order to make the optimization process faster and more
successful, I wanted to use the results of previous optimizations as the initial guess
for the next run (if possible):
multimode oldmodel(systemModel);
systemModel.Clear();
for(int mode = 0; mode < modes; mode++)
{
switch(model)
{
case 0:
if(mode < oldmodel.modeNo() && oldmodel.isValid() && (oldmodel[mode]->model == 0))
systemModel.Add(oldmodel[mode]);
else
{
systemModel.Add(gcnew model0);
systemModel[mode]->coefficients[0] = 100.0 / double(mode + 1);
...
}
break;
...
Now, my problem is, after this change, it seems that the messages don't get passed
correctly: the first time the start button is clicked everything functions as it should,
but from then on, if the statement systemModel.Add(oldmodel[mode]); gets executed,
results remain the same as the initial guesses, and don't get updated after the fit
function is called.
So, why should these two lines(Add(oldmodel[mode]) and Add(gcnew model0)) give
such different results?