Excel File download through web api. Getting corrupt - excel

I am trying to download a excel file through Web API (using Entity framework). The download is working but I am getting some error dialog about file corrupt when trying to open the file.
Web API code as below:
public HttpResponseMessage GetValue(int ID, string name)
{
MemoryStream stream;
try {
using (DataContext db = new DataContext()) {
dynamic fileObj = (from c in db.FileList c.ID == IDc).ToList();
stream = new MemoryStream(fileObj(0).File);
HttpResponseMessage result = new HttpResponseMessage(HttpStatusCode.OK);
result.Content = new StreamContent(stream);
result.Content.Headers.ContentType = new MediaTypeHeaderValue(fileObj(0).FileContentType);
result.Content.Headers.ContentDisposition = new ContentDispositionHeaderValue("attachment") { FileName = name };
return result;
}
} catch (Exception ex) {
return Request.CreateResponse(HttpStatusCode.InternalServerError);
}
}
It opens the file with two error dialog and following message.
Excel completed file level validation and repair. Some parts of this workbook may have been repaired or discarded

Trying to solve the same. I compared 2 epplus versions: 4.5.3.3 against 5.2.1. The latter one included a code for closing the stream in the GetAsByteArray procedure. So, I just added those lines to the 4.5.3.3 version and it worked like a charm. Looks like the stream initially included some garbage bites which must be deleted before pumping the file data into that stream.
Tested with the NetCore 3.1 web application. Hope it will solve the issue in your case.
if (save)
{
Workbook.Save();
_package.Close();
/* start of added code */
if (_stream is MemoryStream && _stream.Length > 0)
{
CloseStream();
}
/* end of added code */
_package.Save(_stream);
}

I had the same issue, problem is not in the web api code, but it is in the client side code. For me i was using jquery. Following code fixed it for me.
I was creating a blob from the result, which is not required as, result is already a blob.
window.URL.createObjectURL(result);
Note that i am creating object straight away from the result. Full Jquery code below.
Credit goes to mgracs in here
$.ajax({
type: 'POST',
url: url + "/download",
data: data,
xhr: function () {
var xhr = new XMLHttpRequest();
xhr.responseType = 'blob'
return xhr;
},
success: function (result, status, xhr) {
var filename = "";
var disposition = xhr.getResponseHeader('Content-Disposition');
if (disposition && disposition.indexOf('attachment') !== -1) {
var filenameRegex = /filename[^;=\n]*=((['"]).*?\2|[^;\n]*)/;
var matches = filenameRegex.exec(disposition);
if (matches != null && matches[1]) filename = matches[1].replace(/['"]/g, '');
}
var link = document.createElement('a');
link.href = window.URL.createObjectURL(result);
link.download = filename;
link.click();
}, error: function (a, b) {
console.log('Error');
}
});

Related

GCF "No Such Object" when the Object in question was just created

I'm setting up a Google Cloud Functions (GCF) function that gets triggered often enough that there are multiple instances running at the same time.
I am getting errors from a readStream the source file of the stream does not exist, but at this point in my program I've actually just created it.
I've made sure the file exists before the start of the stream by console.log()-ing the file JSON, so the file does actually exist. I've also made sure that the file I'm trying to access has finished being written by a previous stream with an await, but no dice.
EDIT: The code now contains the entire script. The section that seems to be throwing the error is the function columnDelete().
var parse = require('fast-csv');
var Storage = require('#google-cloud/storage');
var Transform = require('readable-stream').Transform;
var storage = new Storage();
var bucket = storage.bucket('<BUCKET>');
const DMSs = ['PBS','CDK','One_Eighty','InfoBahn'];
class DeleteColumns extends Transform{
constructor(){
super({objectMode:true})
}
_transform(row, enc, done){
//create an array 2 elements shorter than received
let newRow = new Array(row.length - 2);
//write all data but the first two columns
for(let i = 0; i < newRow.length; i++){
newRow[i] = row[i+2];
}
this.push(newRow.toString() + '\n');
done();
}
}
function rename(file, originalFile, DMS){
return new Promise((resolve, reject) => {
var dealer;
var date;
var header = true;
var parser = parse({delimiter : ",", quote:'\\'});
//for each row of data
var stream = originalFile.createReadStream();
stream.pipe(parser)
.on('data', (row)=>{
//if this is the first line do nothing
if(header){
header = false;
}
//otherwise record the contents of the first two columns and then destroy the stream
else {
dealer = row[0].toString().replace('"', '').replace('"', '');
date = row[1].toString().replace('"', '').replace('"', '');
stream.end();
}
})
.on('finish', function(){
var newName = dealer + ' ' + date + '_' + DMS + 'temp.csv';
//if this was not triggered by the renaming of a file
if(!file.name.includes(dealer)&&!file.name.includes(':')){
console.log('Renamed ' + file.name);
originalFile.copy(newName);
originalFile.copy(newName.replace('temp',''));
}else{
newName = 'Not Renamed';
console.log('Oops, triggered by the rename');
}
resolve(newName);
});
});
}
function columnDelete(fileName){
return new Promise((resolve, reject) =>{
console.log('Deleting Columns...');
console.log(bucket.file(fileName));
var parser = parse({delimiter : ",", quote:'\\'});
var del = new DeleteColumns();
var temp = bucket.file(fileName);
var final = bucket.file(fileName.replace('temp', ''));
//for each row of data
temp.createReadStream()
//parse the csv
.pipe(parser)
//delete first two columns
.pipe(del)
//write to new file
.pipe(final.createWriteStream()
.on('finish', function(){
console.log('Columns Deleted');
temp.delete();
resolve();
})
);
});
}
exports.triggerRename = async(data, context) => {
var DMS = 'Triple';
var file = data;
//if not a temporary file
if(!file.name.includes('temp')){
//create a new File object from the name of the data passed
const originalFile = bucket.file(file.name);
//identify which database this data is from
DMSs.forEach(function(database){
if(file.name.includes(database)){
DMS = database;
}
});
//rename the file
var tempName = await rename(file, originalFile, DMS);
//if it was renamed, delete the extra columns
if (!tempName.includes('Not Renamed')){
await columnDelete(tempName);
}
} else if(file.name.includes('undefined')){
console.log(file.name + ' is invalid. Deleted.');
bucket.file(file.name).delete();
}
else {
console.log( file.name + ' is a temporary file. Did not rename.');
}
};
What I expect to be output is as below:
Deleting Columns...
Columns Deleted
Nice and simple, letting us know when it has started and finished.
However, I get this instead:
Deleting Columns...
ApiError: No such object: <file> at at Object.parseHttpRespMessage(......)
finished with status: 'crash'
Which is not wanted for obvious reasons. My next thought is to make sure that the file hasn't been deleted by another instance of the script midway through, but to do that I would have to check to see if the file is being used by another stream, which is, to my knowledge, not possible.
Any ideas out there?
When I was creating the file I called the asynchronous function copy() and moved on, meaning that when trying to access the file it was not finished copying. Unknown to me, the File Object is a reference variable, and did not actually contain the file itself. While the file was copying, the pointer was present but it was pointing to an unfinished file.
Thus, "No Such Object". To fix this, I simply used a callback to make sure that the copying was finished before I was accessing the file.
Thanks to Doug Stevenson for letting me know about the pointer!

Using socketio-file-upload to upload multiple files

Im using NodeJS with socket.io and socketio-file-upload to upload multiple files, it works great! However I'm having an issue where I'm trying to save the name attribute of the input these files come to save them into my DB.
When I upload 1 or more files, I can't seem to access the input field name or something that shows me which of the files come from which input field.
Here is my front:
var uploader = new SocketIOFileUpload(socket);
var array_files_lvl_3 = [
document.getElementById("l3_id_front"),
document.getElementById("l3_id_back"),
document.getElementById("l3_address_proof_1"),
document.getElementById("l3_address_proof_2"),
document.getElementById("l3_passport")
];
uploader.listenOnArraySubmit(document.getElementById("save_level_3"), array_files_lvl_3);
And here is my back:
var uploader = new siofu();
uploader.dir = "uploads/userL3";
uploader.listen(socket);
uploader.on('saved', function(evnt){
console.log(evnt);
//this "event" variable has a lot of information
//but none of it tells me the input name where it came from.
});
This is what the "evnt" variable holds:
Unfortunately the library doesn't send that information. So there is nothing existing config you can do. So this needs code modification.
client.js:374
var _fileSelectCallback = function (event) {
var files = event.target.files || event.dataTransfer.files;
event.preventDefault();
var source = event.target;
_baseFileSelectCallback(files, source);
client.js:343
var _baseFileSelectCallback = function (files, source) {
if (files.length === 0) return;
// Ensure existence of meta property on each file
for (var i = 0; i < files.length; i++) {
if (source) {
if (!files[i].meta) files[i].meta = {
sourceElementId: source.id || "",
sourceElementName: source.name || ""
};
} else {
if (!files[i].meta) files[i].meta = {};
}
}
After these changes I am able to get the details in event.file.meta
I'm the author of socketio-file-upload.
It looks like the specific input field is not currently being recorded, but this would not be a hard feature to add. Someone opened a new issue and left a backpointer to this SO question.
A workaround would be to directly use submitFiles instead of listenOnArraySubmit. Something like this might work (untested):
// add a manual listener on your submit button
document.getElementById("save_level_3").addEventListener("click", () => {
let index = 0;
for (let element of array_files_lvl_3) {
let files = element.files;
for (let file of files) {
file.meta = { index };
}
uploader.submitFiles(files);
index++;
}
});

Extjs and Spring success handler

I'm building web application with ExtJs and Spring. With ExtJs I'm uploading excel file and with spring I parse it. Everything works fine, although my success handler in ExtJs code doesn't work. I read many examples about that, but I still can't solve this problem. I hope you guys will help me.
This is ExtJs upload file function:
uploadFile: function(value, fld){
var me = this;
var id = this.getTipasCombobox().getValue();
var record = this.getTipasCombobox().getStore().getById(id);
var timeId = this.getLaikotarpiaiCombo().getValue();
var timeRecord = this.getLaikotarpiaiCombo().getStore().getById(timeId);
var fp = this.getUploadBtn().up('form').getForm();
if(fp.isValid()){
fp.submit({
url: Turtas.Properties.getServicePath()+'/save/' + record.data.resource,
waitMsg: 'Failas yra įkeliamas, prašome palaukti...',
success: function(){
console.log("Upload completed");
me.storeSelection(record.data.resource, timeRecord.data.open, timeRecord.data.year, true, me.restrictedPage())
}
})
}
},
And this is my controller function which I use to parse Excel:
#RequestMapping(value="turtas/save/gelezinkeliai", method=RequestMethod.POST)
public String saveGelezinkeliaiFromExcel(HttpServletResponse response, #RequestParam("file") MultipartFile file){
if (file.getSize() != 0){
ReadExcelFileToList excelFile = new ReadExcelFileToList();
List<Gelezinkeliai> gelezinkeliai = new ArrayList<Gelezinkeliai>();
gelezinkeliai = excelFile.readExcelData(/*fileName*/ file);
gelezinkeliai.remove(0);
Laikotarpis laikotarpis = new Laikotarpis();
laikotarpis = laikotarpiaiService.findById(2014);
for (GelezinkeliaiRest delGelezinkelis : service.getItems(2014, true, false)){
service.delete(delGelezinkelis);
}
List<Gelezinkeliai> finalGelezinkeliai = new ArrayList<Gelezinkeliai>();
for (Gelezinkeliai gelezinkelis : gelezinkeliai){
gelezinkelis.setLaikotarpis(laikotarpis);
finalGelezinkeliai.add(gelezinkelis);
GelezinkeliaiRest gelezinkeliaiRest = new GelezinkeliaiRest();
gelezinkeliaiRest.fromGelezinkeliai(gelezinkelis);
service.save(gelezinkeliaiRest, false);
}
response.setContentType("text/html");
return "{\"success\":true}";
}
else {
return "{\"success\":false}";
}
}
Response content-type is text/html and response is:
{"success":true}
If I manually reload page or store, data is updated, so functions work properly, but i want to catch event when the request ends, to reload store automatically. I have no idea what's wrong. Thanks for responses in advance!

Persist data on disk using chrome extension API

I am trying to save some data which should be available even when restart the browser So this data should persist. I am using Chrome Storage Sync API for this. But when I am restarting my browser, I get empty object on using chrome.storage.get.
Here is my sample code:
SW.methods.saveTaskListStore = function() {
chrome.storage.sync.set({
'taskListStore': SW.stores.taskListStore
}, function() {
if (SW.callbacks.watchProcessSuccessCallback) {
SW.callbacks.watchProcessSuccessCallback(SW.messages.INFO_DATA_SAVED);
SW.callbacks.watchProcessSuccessCallback = null;
}
});
};
SW.methods.loadTaskListStore = function() {
SW.stores.loadTaskListStore = [];
chrome.storage.sync.get('taskListStore', function(taskFeed) {
var tasks = taskFeed.tasks;
if (tasks && !tasks.length) {
SW.stores.loadTaskListStore = tasks;
}
});
};
I guess I am using the Wrong API.
If this is not some copy-paste error, you are storing under key taskListStore and trying to get data under key loadTaskListStore.
Besides that, according to the documentation on StorageArea.get(), the result object is an object with items in their key-value mappings. Thus, in your case, you should do:
chrome.storage.sync.get("taskListStore", function(items) {
if (items.taskListStore) {
var tasks = items.taskListStore.tasks;
...

chrome extension connection issues

I have written an extension for google chrome and I have a bug I need a help solving.
what I do is using either a text selection or an input of text search for photos on flickr and then create a results tab.
The extension works most of the times. but sometimes it creates a blank tab with no results and when I repeat the same search it then shows results. I figured that it's something to do with the html files messaging maybe something to do with them communicating. I have to say that I always receive the results from flickr so that the request/responce with flickr works ok. Sometimes the error happens when I play with other tabs or do something on other tabs while waiting for results. can you please help me figure out where's the fault?
the background file:
function searchSelection(info,tab){
var updated;
if(info.selectionText==null){
var value = prompt("Search Flickr", "Type in the value to search");
updated=makeNewString(value);
}
else{
updated=makeNewString(info.selectionText);
}
var resultHtml;
var xhReq = new XMLHttpRequest();
xhReq.open(
"GET",
"http://api.flickr.com/services/rest/?method=flickr.photos.search&text="+updated+
"&api_key=a0a60c4e0ed00af8d70800b0987cae70&content_type=7&sort=relevance&per_page=500",
true);
xhReq.onreadystatechange = function () {
if (xhReq.readyState == 4) {
if (xhReq.status == 200) {
chrome.tabs.executeScript(tab.id, {code:"document.body.style.cursor='auto';"});
var photos = xhReq.responseXML.getElementsByTagName("photo");
if(photos.length==0){
alert("No results found for this selection");
chrome.tabs.executeScript(tab.id, {code:"document.body.style.cursor='auto';"});
return;
}
var myJSPhotos=[];
for(var i=0; i<photos.length; i++){
var data={"id":photos[i].getAttribute("id"),"owner":photos[i].getAttribute("owner"),
"secret":photos[i].getAttribute("secret"),"server":photos[i].getAttribute("server"),
"farm":photos[i].getAttribute("farm"),"title":photos[i].getAttribute("title")};
myJSPhotos[i]=data;
}
chrome.tabs.create({"url":"results.html"},function(thistab){
var port= chrome.tabs.connect(thistab.id);
port.postMessage({photos:myJSPhotos});
});
}
};
};
xhReq.send(null);
chrome.tabs.executeScript(tab.id, {code:"document.body.style.cursor='wait';"});
}
var context="selection";
var id = chrome.contextMenus.create({"title": "search Flickr", "contexts":[context,'page'],"onclick":searchSelection});
results html: has only a reference to the js file res.js
res.js :
chrome.extension.onConnect.addListener(function(port) {
port.onMessage.addListener(function(msg) {
//*****//
var photos=msg.photos;
createPage(photos);
});
});
I have to mention that when the tab is empty if I put alert on the //*****// part it won't
fire.
but when I print out the photos.length at the tab create call back function part it prints out the correct result.
Try to set "run_at":"document_start" option for your res.js in the manifest.
I think callback from chrome.tabs.create is fired right away without waiting for page scripts to be loaded, so you might try something like this instead:
//global vars
var createdTabId = null;
var myJSPhotos = null;
xhReq.onreadystatechange = function () {
//assign myJSPhotos to a global var
chrome.tabs.create({"url":"results.html"},function(thistab){
createdTabId = thistab.id;
});
}
chrome.tabs.onUpdated.addListener(function(tabId, changeInfo, tab) {
if(changeInfo.status == "complete" && tab.id == createdTabId) {
createdTabId = null;
//now page is loaded and content scripts injected
var port = chrome.tabs.connect(tab.id);
port.postMessage({photos:myJSPhotos});
}
});

Resources