I am trying to send 6 x Excel Sheets as Attachment from 1 Google Sheet as Attachment, sometimes this code run's perfectly but sometime it gives the error.
The Original Code
///*** SEND 6 X ATTACHMENTS IN 1 EMAIL FROM GOOGLE SHEET USING APP SCRIPT ***///
function sendExcelAttachmentsInOneEmail() {
var url = "https://docs.google.com/spreadsheets/d/"+SpreadsheetApp.getActiveSpreadsheet().getId()+"/export"+"?format=xlsx&"+"gid="+SpreadsheetApp.getActiveSpreadsheet().getSheetByName("Sheet1").getSheetId();
var url1 = "https://docs.google.com/spreadsheets/d/"+SpreadsheetApp.getActiveSpreadsheet().getId()+"/export"+"?format=xlsx&"+"gid="+SpreadsheetApp.getActiveSpreadsheet().getSheetByName("Sheet2").getSheetId();
var url2 = "https://docs.google.com/spreadsheets/d/"+SpreadsheetApp.getActiveSpreadsheet().getId()+"/export"+"?format=xlsx&"+"gid="+SpreadsheetApp.getActiveSpreadsheet().getSheetByName("Sheet3").getSheetId();
var url3 = "https://docs.google.com/spreadsheets/d/"+SpreadsheetApp.getActiveSpreadsheet().getId()+"/export"+"?format=xlsx&"+"gid="+SpreadsheetApp.getActiveSpreadsheet().getSheetByName("Sheet4").getSheetId();
var url4 = "https://docs.google.com/spreadsheets/d/"+SpreadsheetApp.getActiveSpreadsheet().getId()+"/export"+"?format=xlsx&"+"gid="+SpreadsheetApp.getActiveSpreadsheet().getSheetByName("Sheet5").getSheetId();
var url5 = "https://docs.google.com/spreadsheets/d/"+SpreadsheetApp.getActiveSpreadsheet().getId()+"/export"+"?format=xlsx&"+"gid="+SpreadsheetApp.getActiveSpreadsheet().getSheetByName("Sheet6").getSheetId();
var ss3 = SpreadsheetApp.getActiveSpreadsheet();
var sheet3 = ss3.getSheetByName("Sheet7");
var value4 = sheet3.getRange("J1").getValue();
var params = {method:"GET",headers:{"authorization":"Bearer "+ ScriptApp.getOAuthToken()}};
var blob = UrlFetchApp.fetch(url, params).getBlob().setName("Sheet1 XYZ Reports.xlsx");
var blob1 = UrlFetchApp.fetch(url1, params).getBlob().setName("Sheet2 TXT Data.xlsx");
var blob2 = UrlFetchApp.fetch(url2, params).getBlob().setName("Sheet3 RAW FILES.xlsx");
var blob3 = UrlFetchApp.fetch(url3, params).getBlob().setName("Sheet4 SYS DATA.xlsx");
var blob4 = UrlFetchApp.fetch(url4, params).getBlob().setName("Sheet5 REPORTED ISSUES.xlsx");
var blob5 = UrlFetchApp.fetch(url5, params).getBlob().setName("Sheet6 FIXED ISSUES.xlsx");
var message = {
to: "email#domain.com",
cc: "email#domain.com",
subject: "REPORTS - "+value4,
body: "Hi Team,\n\nPlease find attached Reprots.\n\nBest Regards!",
name: "",
attachments: [blob, blob1, blob2, blob3, blob4, blob5]
}
MailApp.sendEmail(message);
}
Error Message from App Script
9:05:45 PM Notice Execution started
9:05:49 PM Error Exception: Request failed for https://docs.google.com returned code 429. Truncated server response: <!DOCTYPE html><html lang="en"><head><meta name="description" content="Web word processing, presentations and spreadsheets"><meta name="viewport" c... (use muteHttpExceptions option to examine full response)
sendExcelAttachmentsInOneEmail # Code.gs:44
I have made it work and also did an overhaul to the script due to personal reasons (I don't like repetitive lines of code). See the working script below.
Script:
function sendExcelAttachmentsInOneEmail() {
var sheets = ['Sheet1', 'Sheet2', 'Sheet3', 'Sheet4', 'Sheet5', 'Sheet6'];
var spreadSheet = SpreadsheetApp.getActiveSpreadsheet();
var spreadSheetId = spreadSheet.getId();
var urls = sheets.map(sheet => {
var sheetId = spreadSheet.getSheetByName(sheet).getSheetId();
// Used this alternative url since I'm getting the error below. This other url has less traffic issues during testing:
// "This file might be unavailable right now due to heavy traffic. Try again."
return `https://docs.google.com/feeds/download/spreadsheets/Export?key=${spreadSheetId}&gid=${sheetId}&exportFormat=xlsx`;
});
var reportName = spreadSheet.getSheetByName('Sheet7').getRange(1, 10).getValue();
var params = {
method: 'GET',
headers: {
'Authorization': 'Bearer ' + ScriptApp.getOAuthToken()
},
// Add mute http exceptions to proceed
muteHttpExceptions: true
};
var fileNames = ['Sheet1 XYZ Reports.xlsx',
'Sheet2 TXT Data.xlsx',
'Sheet3 RAW FILES.xlsx',
'Sheet4 SYS DATA.xlsx',
'Sheet5 REPORTED ISSUES.xlsx',
'Sheet6 FIXED ISSUES.xlsx'];
var blobs = urls.map((url, index) => {
// Added an interval due to heavy traffic error. Increase interval if needed.
Utilities.sleep(1000);
return UrlFetchApp.fetch(url, params).getBlob().setName(fileNames[index]);
});
var message = {
to: 'user#domain.com',
subject: 'REPORTS - ' + reportName,
body: "Hi Team,\n\nPlease find attached Reports.\n\nBest Regards!",
attachments: blobs
}
MailApp.sendEmail(message);
}
Output:
Notes:
Important bits here are the following:
muteHttpExceptions
The alternative url for less traffic issues
Utilities.sleep(1000)
Related
Transferring image (base64 encoded, created with Mapguide server) to client. I am able to output the image to the console and test it is correct. Using Node with npm and Vite for develpment web server. When I try to set imgLegend.src = data; I get this error "431 (Request Header Fields Too Large)" I believe it is the Node default max-http-header-size causing the problem. Have attempted to set --max-http-header-size=80000 with no luck. I am starting my dev server in package.json file like this: "start": "vite --host 0.0.0.0",
Does anyone know of a way around this or a better way to transfer the image from server to client?
here is the relevant code.
Client side:
//add legend
const mapVpHeight = document.getElementById('map').clientHeight;
var url = mgServer + "/Cid_Map/LayerManager.aspx/GetLegendImage";
var values = JSON.stringify({ sessionId: sessionId, mgMapName: mapName, mapVpHeight: mapVpHeight });
var imgLegend = new Image();
//console.log(values);
$.ajax({
url: url,
type: "POST",
contentType: "application/json; charset=utf-8",
data: values,
dataType: 'html',
success: function (data) {
console.log(data); //
imgLegend.src = data; //node.js won't allow http header as large as this image, about 18kb
},
error: function (xhr, textStatus, error) {
console.log(textStatus);
}
});
Server Side:
[WebMethod]
public static string GetLegendImage(string sessionId, string mgMapName, int mapVpHeight)
{
string tempDir = System.Configuration.ConfigurationManager.AppSettings["tempDir"];
string legFilePath = tempDir + sessionId + "Legend.png";
string configPath = #"C:\Program Files\OSGeo\MapGuide\Web\www\webconfig.ini";
MapGuideApi.MgInitializeWebTier(configPath);
MgUserInformation userInfo = new MgUserInformation(sessionId);
MgSiteConnection siteConnection = new MgSiteConnection();
siteConnection.Open(userInfo);
MgMap map = new MgMap(siteConnection);
MgResourceService resourceService = (MgResourceService)siteConnection.CreateService(MgServiceType.ResourceService);
map.Open(resourceService, mgMapName);
MgColor color = new MgColor(226, 226, 226);
MgRenderingService renderingService = (MgRenderingService)siteConnection.CreateService(MgServiceType.RenderingService);
MgByteReader byteReader = renderingService.RenderMapLegend(map, 200, mapVpHeight, color, "PNG");
MgByteSink byteSink = new MgByteSink(byteReader);
byteSink.ToFile(legFilePath);
//try this
//byte[] buffer = new byte[byteReader.GetLength()]; //something doesn't work here byteReader doesn't give comeplete image
//byteReader.Read(buffer, buffer.Length);
//loading image file just created, converting to base64 image gives correct image
string legendImageURL = "";
using (Stream fs = File.OpenRead(legFilePath))
{
BinaryReader br = new System.IO.BinaryReader(fs);
byte[] bytes = br.ReadBytes((int)fs.Length);
string strLegendImage = Convert.ToBase64String(bytes, 0, bytes.Length);
legendImageURL = "data:image/png;base64," + strLegendImage;
}
byteReader.Dispose();
byteSink.Dispose();
return legendImageURL;
//return buffer;
}
The 431 status code complains about the header length of your request ..
trace the request in your browsers dev tool network tab and study the header fields in your request in some special cases if your cookies get set to often with unique key value pairs this could be the problem...
May be you can copy and share the request response from your browsers network tab to provide some detailed information... especially the request response of the endpoint and look up the cookie/session storage maybe you find some suspicious stuff.
Good look :)
I had been following the documentation of Script Based Authentication for Damn Vulnerable Web Application using ZAP. I have navigated to http://localhost/dvwa/login.php through Manual Explore which opens up the DVWA application on my localhost as follows:
and adds the URL to the Default Context.
I've also created the dvwa script with the following configuration:
and modified the dvwa script:
Now when I try Configure Context Authentication, dvwa script does gets loaded but the CSRF field doesn't shows up.
Additionally, POST Data doesn't even shows up but Extra POST Data is shown.
Am I missing something in the steps? Can someone help me out?
The modified script within the documentation of Script Based Authentication section for Damn Vulnerable Web Application using ZAP
seems incomplete.
The complete script is available at Setting up ZAP to Test Damn Vulnerable Web App (DVWA) which is as follows:
function authenticate(helper, paramsValues, credentials) {
var loginUrl = paramsValues.get("Login URL");
var csrfTokenName = paramsValues.get("CSRF Field");
var csrfTokenValue = extractInputFieldValue(getPageContent(helper, loginUrl), csrfTokenName);
var postData = paramsValues.get("POST Data");
postData = postData.replace('{%username%}', encodeURIComponent(credentials.getParam("Username")));
postData = postData.replace('{%password%}', encodeURIComponent(credentials.getParam("Password")));
postData = postData.replace('{%' + csrfTokenName + '%}', encodeURIComponent(csrfTokenValue));
var msg = sendAndReceive(helper, loginUrl, postData);
return msg;
}
function getRequiredParamsNames() {
return [ "Login URL", "CSRF Field", "POST Data" ];
}
function getOptionalParamsNames() {
return [];
}
function getCredentialsParamsNames() {
return [ "Username", "Password" ];
}
function getPageContent(helper, url) {
var msg = sendAndReceive(helper, url);
return msg.getResponseBody().toString();
}
function sendAndReceive(helper, url, postData) {
var msg = helper.prepareMessage();
var method = "GET";
if (postData) {
method = "POST";
msg.setRequestBody(postData);
}
var requestUri = new org.apache.commons.httpclient.URI(url, true);
var requestHeader = new org.parosproxy.paros.network.HttpRequestHeader(method, requestUri, "HTTP/1.0");
msg.setRequestHeader(requestHeader);
helper.sendAndReceive(msg);
return msg;
}
function extractInputFieldValue(page, fieldName) {
// Rhino:
var src = new net.htmlparser.jericho.Source(page);
// Nashorn:
// var Source = Java.type("net.htmlparser.jericho.Source");
// var src = new Source(page);
var it = src.getAllElements('input').iterator();
while (it.hasNext()) {
var element = it.next();
if (element.getAttributeValue('name') == fieldName) {
return element.getAttributeValue('value');
}
}
return '';
}
Using this script, CSRF Field and POST Data field shows up just perfect.
I am working on trying to convert files that I am copying to a shared drive from my email. The files are saved in xlsx and I lose information if I try to download them from the source as .csv files.
function convertExceltoGoogleSpreadsheet(fileName) {
try {
fileName = "JNJ Defects Last Shift.xlsx";
var excelFile = DriveApp.getFilesByName(fileName).next();
var fileId = excelFile.getId();
var folderId = "0AEZiKNnbsme8Uk9PVA";
var blob = excelFile.getBlob();
//var resource = {
//title: excelFile.getName(),
//mimeType: MimeType.GOOGLE_SHEETS,
//parents: [{id: folderId}],
//};
blob.setContentType("application/vnd.google-apps.spreadsheet").setName("JNJ Defects Last Shift 22 Jun 20");
DriveApp.createFile(blob);
//Drive.Files.insert(resource, blob);
} catch (f) {
Logger.log(f.toString());
}
}
When I run it I get this error message
[20-06-23 07:06:08:638 CDT] Exception: Invalid argument: file.contentType.
I have tried a couple of variations of the contentType but did not manage to convert to sheets.
How can I do?
Answer:
You can do this with the Advanced Drive Service.
More Information:
In order to do the conversion, you need to specify specifically that you wish to convert the file, though as this will default to a Google Sheet format, you do not need to manually specify the MimeType.
Code:
function convertExceltoGoogleSpreadsheet(fileName) {
fileName = "JNJ Defects Last Shift.xlsx";
var excelFile = DriveApp.getFilesByName(fileName).next();
var fileId = excelFile.getId();
var folderId = "0AEZiKNnbsme8Uk9PVA";
var blob = excelFile.getBlob();
var file = {
title: "JNJ Defects Last Shift 22 Jun 20",
parents: [
{
"kind": "drive#parentReference",
"id": folderId
}
]
};
file = Drive.Files.insert(file, blob, {
convert: true
});
}
Here is my code snippet
var sendgrid = require('sendgrid')('xxxxxx', 'xxxxxx');
var email = new sendgrid.Email();
email.addTo('xyz#gmail.com');
email.setFrom('xyz#gmail.com');
email.setSubject('welcome to send grid');
email.setHtml('<html><body>HELLO evryone ...,</body></html>');
sendgrid.send(email, function(err, json) {
if(!err)
{
console.log("mail sent successssss");
res.send({"status":0,"msg":"failure","result":"Mail sent successfully"});
}
else
{
console.log("error while sending mail")
res.send({"status":1,"msg":"failure","result":"Error while sending mail."});
}
});
Installed sendgrid throgh npm also.am getting "TypeError: object is not a function" error.MAy i know why.??
Version:--
sendgrid#3.0.8 node_modules\sendgrid
└── sendgrid-rest#2.2.1
It looks like you're using sendgrid#3.0.8 but trying to call on the sendgrid#2.* api.
v2 implementation: https://sendgrid.com/docs/Integrate/Code_Examples/v2_Mail/nodejs.html
v3 implementation:
https://sendgrid.com/docs/Integrate/Code_Examples/v3_Mail/nodejs.html
Give the v3 a go.
As for the type error:
v2
var sendgrid = require("sendgrid")("SENDGRID_APIKEY");
you're invoking a function
however you have v3 installed
require('sendgrid').SendGrid(process.env.SENDGRID_API_KEY)
and it's now an object
REQUESTED UPDATE:
I don't know too much about the keys given, but since they have tons of different supported libraries, it's completely possible that some of them use both while others use only one. If you really only have a USER_API_KEY nad PASSWORD_API_KEY, just use the user_api_key
Here is their source for the nodejs implementation module SendGrid:
function SendGrid (apiKey, host, globalHeaders) {
var Client = require('sendgrid-rest').Client
var globalRequest = JSON.parse(JSON.stringify(require('sendgrid-rest').emptyRequest));
globalRequest.host = host || "api.sendgrid.com";
globalRequest.headers['Authorization'] = 'Bearer '.concat(apiKey)
globalRequest.headers['User-Agent'] = 'sendgrid/' + package_json.version + ';nodejs'
globalRequest.headers['Accept'] = 'application/json'
if (globalHeaders) {
for (var obj in globalHeaders) {
for (var key in globalHeaders[obj] ) {
globalRequest.headers[key] = globalHeaders[obj][key]
}
}
}
The apiKey is attached to the header as an auth, and it looks like that's all you need.
Try following their install steps, without your own implementation,
1) (OPTIONAL) Update the development environment with your SENDGRID_API_KEY, for example:
echo "export SENDGRID_API_KEY='YOUR_API_KEY'" > sendgrid.env
echo "sendgrid.env" >> .gitignore
source ./sendgrid.env
========
2) Make this class and if you did the above use process.env.SENDGRID_API_KEY else put your USER_API_KEY
var helper = require('sendgrid').mail
from_email = new helper.Email("test#example.com")
to_email = new helper.Email("test#example.com")
subject = "Hello World from the SendGrid Node.js Library!"
content = new helper.Content("text/plain", "Hello, Email!")
mail = new helper.Mail(from_email, subject, to_email, content)
//process.env.SENDGRID_API_KEY if above is done
//else just use USER_API_KEY as is
var sg = require('sendgrid').SendGrid(process.env.SENDGRID_API_KEY)
var requestBody = mail.toJSON()
var request = sg.emptyRequest()
request.method = 'POST'
request.path = '/v3/mail/send'
request.body = requestBody
sg.API(request, function (response) {
console.log(response.statusCode)
console.log(response.body)
console.log(response.headers)
})
I'm using the Users.messages:modify method to apply labels to emails, however, I must refresh the page before the labels which I apply programmatically appear on the gmail user interface.
The desired action is akin to if I manually select a gmail message and then apply a label from the dropdown label applicator at the top of the gmail screen: the label is applied asynchronously. Is this possible to do programmatically?
Code
var applyLabel = function (gapiRequestURL, labelIdsArr)
{
$.ajax({
url: gapiRequestURL,
method: "POST",
contentType: "application/json",
data: JSON.stringify({
addLabelIds: labelIdsArr
}),
success: function(msg){
// alert(JSON.stringify(msg));
},
error: function(msg){
alert("Error:" + JSON.stringify(msg));
}
})
}
var decideWhichLabelToApply = function(messageContentsArr){
var testLabelOne = "Label_12"
var testLabelTwo = "Label_13"
var labelIdsArr = []
for(var i=0; i < messageContentsArr.length; i++){
var currentMessage = messageContentsArr[i]
var messageID = currentMessage.id
if (true){
var labelModifyURL = "https://www.googleapis.com/gmail/v1/users/me/messages/" + messageID + "/modify?access_token=" + thisToken
labelIdsArr.push(testLabelOne)
applyLabel(labelModifyURL, labelIdsArr)
}
else {
var labelModifyURL = "https://www.googleapis.com/gmail/v1/users/me/messages/" + messageID + "/modify?access_token=" + thisToken
labelIdsArr.push(testLabelTwo)
applyLabel(labelModifyURL, labelIdsArr)
}
}
}
Not that I know of. The Gmail web interface does some lazy caching and doesn't seem to notice particularly well changes to the underlying data (i.e. from Inbox, IMAP, API, etc). I believe it doesn't require a full browser (F5) refresh but certainly one needs to do some UI action like clicking on labels or hitting the in-webpage refresh icon for update to show up.