Search for all children items in parent folder google drive api - search

Since in V3 children was deprecated, i'm trying to get another way to search inside parent folder (using parent id) and get all children inside within subfolders too.
Didn't find a way how to do it in drive docs.
Please advice.

The following works for me:
function getChildFolders(folderId){
var q = "mimeType = 'application/vnd.google-apps.folder' and '"+folderId+"' in parents";
var children = Drive.Files.list({q:q});
var res = [];
for(var i=0; i<children.items.length; i++){
res.push(children.items[i].title);
res = res.concat(getChildFolders(children.items[i].id));
}
return res;
}
Logger.log(getChildFolders(rootId));
But getting all (grant)child folders at once does not seem possible indeed.

Unfortunately, Drive API doesn't have a search filter like "return all children of FOLDER_NAME". The only available search queries are found in Search for Files.
But, there's a somewhat similar filter you can use, but this time, using the parent folder's ID. This applies to subfolders too.
So for example you have this URL of the parent folder which contains your files.
https://drive.google.com/drive/folders/123456789
Use the search query
'123456789' in parents
and it will return all the files inside that folder.
Give the Files.list Try-it a test.

Perhaps you could use this 2 functions..
/**
* Export Array Files N' Folder Names from Drive Folder
*/
function listCurFoldersEFilesName(folderId) {
var data= [];
var j =0 , i = 0;
var folders = DriveApp.getFolderById(folderId).getFolders(); //returns a FolderInterator
while (folders.hasNext()) {
var folder = folders.next();
var folderName = folder.getName();
data[j] = folderName.trim();
j++;
}
var files = DriveApp.getFolderById(folderId).getFiles(); //returns a FileInterator
while (files.hasNext()) {
var file = files.next();
var fileName = file.getName();
data[j+i] = fileName.trim();
i++;
}
return data;
}
/**
* Export Array Files N' Folder IDs from Drive Folder
*/
function listCurFoldersEFilesIDs(folderId) {
var data= [];
var j =0 , i = 0;
var folders = DriveApp.getFolderById(folderId).getFolders(); //returns a FolderInterator
while (folders.hasNext()) {
var folder = folders.next();
var infolderId = folder.getId();
data[j] = infolderId;
j++;
}
var files = DriveApp.getFolderById(folderId).getFiles(); //returns a FileInterator
while (files.hasNext()) {
var file = files.next();
var infileId = file.getId();
data[j+i] = infileId;
i++;
}
return data;
}

Related

How to get the most recently image in a folder?

How can I get the most recent image that was added to a folder and save that image file path to a variable? I have checked on here (stack overflow) but don't see a post that specifically answers my questions. This is what I have so far it lists out all the files but I am unsure how to get them sorted by most recently modified. This is a unique and specific question. I don't mind if this code is used or not as long as the result is code that can get the most recent file in a folder.
Code:
(async () => {
var lastdownloadedimage = "";
var pathtocheck = "C:/Users/user1/Downloads";
var pathtocheckimage = "C:/Users/user1/Downloads/ot.png";
const testFolder = pathtocheck;
const fs = require('fs');
var lastdownloadedimage;
var filescount = 0;
var filename = [];
var filedates = [];
var filessortedbytimefromcurrentdateaccending = [];
var files;
//create a tuple for the file date and name
var filedata = [];
//fs.readdirSync(testFolder).forEach( filescount++);
files = fs.readdirSync(testFolder);
filescount = files.length;
console.log(files[0]);
filedates = fs.statSync(pathtocheckimage).mtime.getTime();
filename = fs.readdirSync(testFolder);
console.log(filescount);
for(var currentfiletocheck = 0; currentfiletocheck < filescount ; currentfiletocheck++){
//get current date
//find dile that is closest to current date
//use the index of that file data to find the file name
//save the files name to a variable
//filename[currentfiletocheck] = fs.readdirSync(testFolder)[currentfiletocheck];
//filedates[currentfiletocheck] = fs.stats.mtime.getTime()[currentfiletocheck];
//filedata[currentfiletocheck][0] = filename[currentfiletocheck];
//filedata[currentfiletocheck][1] = filedate[currentfiletocheck];
//console.log(files[currentfiletocheck]);
}
filessortedbytimefromcurrentdateaccending
filedata.sort(function(a, b) {
return a < b ? -1 : (a > b ? 1 : 0);
});
for (var i = 0; i < filedata.length; i++) {
var filenamessortedbytimefromcurrentdateaccending = filedata[i][0];
var filedatesortedbytimefromcurrentdateaccending = filedata[i][1];
lastdownloadedimage = filedatesortedbytimefromcurrentdateaccending;
// do something with key and value
}
/*
*/
console.log(lastdownloadedimage);
})();
I have taken a different approach, and rather than gather the timestamps and sort them, I iterate over the files and compare each timestamp with the next one - keeping the timestamp and file path of the later timestamp each time.
Note that this will also check directories, so you will have to implement a filter if you want to ignore them.
let fs = require('fs')
let dirToCheck = '.'
let files = fs.readdirSync(dirToCheck)
let latestPath = `${dirToCheck}/${files[0]}`
let latestTimeStamp = fs.statSync(latestPath).mtime.getTime()
files.forEach(file => {
let path = `${dirToCheck}/${file}`
let timeStamp = fs.statSync(path).mtime.getTime()
if (timeStamp > latestTimeStamp) {
latestTimeStamp = timeStamp
latestPath = path
}
});
console.log(latestPath)

Is there any google API which could save gmail message attachments to google drive?

How can I save gmail message attachments to google drive. Is there any such google API?
To add details, here is the main page of what Google APIs you can use. As Gaurav mentioned, you can use Drive and Gmail API. Here is a snippet using Apps Script:
// GLOBALS
//Array of file extension which you would like to extract to Drive
var fileTypesToExtract = ['jpg', 'tif', 'png', 'gif', 'bmp', 'svg'];
//Name of the folder in google drive i which files will be put
var folderName = 'GmailToDrive';
//Name of the label which will be applied after processing the mail message
var labelName = 'GmailToDrive';
function GmailToDrive(){
//build query to search emails
var query = '';
//filename:jpg OR filename:tif OR filename:gif OR fileName:png OR filename:bmp OR filename:svg'; //'after:'+formattedDate+
for(var i in fileTypesToExtract){
query += (query == '' ?('filename:'+fileTypesToExtract[i]) : (' OR filename:'+fileTypesToExtract[i]));
}
query = 'in:inbox has:nouserlabels ' + query;
var threads = GmailApp.search(query);
var label = getGmailLabel_(labelName);
var parentFolder;
if(threads.length > 0){
parentFolder = getFolder_(folderName);
}
for(var i in threads){
var mesgs = threads[i].getMessages();
for(var j in mesgs){
//get attachments
var attachments = mesgs[j].getAttachments();
for(var k in attachments){
var attachment = attachments[k];
var isImageType = checkIfImage_(attachment);
if(!isImageType) continue;
var attachmentBlob = attachment.copyBlob();
var file = DocsList.createFile(attachmentBlob);
file.addToFolder(parentFolder);
}
}
threads[i].addLabel(label);
}
}
//This function will get the parent folder in Google drive
function getFolder_(folderName){
var folder;
try {folder = DocsList.getFolder(folderName)}
catch(e){ folder = DocsList.createFolder(folderName);}
return folder;
}
//getDate n days back
// n must be integer
function getDateNDaysBack_(n){
n = parseInt(n);
var today = new Date();
var dateNDaysBack = new Date(today.valueOf() - n*24*60*60*1000);
return dateNDaysBack;
}
function getGmailLabel_(name){
var label = GmailApp.getUserLabelByName(name);
if(label == null){
label = GmailApp.createLabel(name);
}
return label;
}
//this function will check for filextension type.
// and return boolean
function checkIfImage_(attachment){
var fileName = attachment.getName();
var temp = fileName.split('.');
var fileExtension = temp[temp.length-1].toLowerCase();
if(fileTypesToExtract.indexOf(fileExtension) != -1) return true;
else return false;
}
Hope this helps.

How to get over 1000 records from a SuiteScript Saved Search?

Below is code I came up with to run a Saved Search in NetSuite using SuiteScript, create a CSV with the Saved Search results and then email the CSV. The trouble is, the results are limited to 1000 records. I've researched this issue and it appears the solution is to run a loop that slices in increments of 1000. A sample of what I believe is used to slice searches is also below.
However, I cannot seem to be able to incorporate the slicing into my code. Can anyone help me combine the slicing code with my original search code?
var search = nlapiSearchRecord('item', 'customsearch219729');
// Creating some array's that will be populated from the saved search results
var content = new Array();
var cells = new Array();
var temp = new Array();
var x = 0;
// Looping through the search Results
for (var i = 0; i < search.length; i++) {
var resultSet = search[i];
// Returns an array of column internal Ids
var columns = resultSet.getAllColumns();
// Looping through each column and assign it to the temp array
for (var y = 0; y <= columns.length; y++) {
temp[y] = resultSet.getValue(columns[y]);
}
// Taking the content of the temp array and assigning it to the Content Array.
content[x] += temp;
// Incrementing the index of the content array
x++;
}
//Inserting headers
content.splice(0, 0, "sku,qty,");
// Creating a string variable that will be used as the CSV Content
var contents;
// Looping through the content array and assigning it to the contents string variable.
for (var z = 0; z < content.length; z++) {
contents += content[z].replace('undefined', '') + '\n';
}
// Creating a csv file and passing the contents string variable.
var file = nlapiCreateFile('InventoryUpdate.csv', 'CSV', contents.replace('undefined', ''));
// Emailing the script.
function SendSSEmail()
{
nlapiSendEmail(768, 5, 'Inventory Update', 'Sending saved search via scheduled script', 'cc#email.com', null, null, file, true, null, 'cc#email.com');
}
The following code is an example of what I found that is used to return more than a 1000 records. Again, as a novice, I can't seem to incorporate the slicing into my original, functioning SuiteScript. Any help is of course greatly appreciated.
var filters = [...];
var columns = [...];
var results = [];
var savedsearch = nlapiCreateSearch( 'customrecord_mybigfatlist', filters, columns );
var resultset = savedsearch.runSearch();
var searchid = 0;
do {
var resultslice = resultset.getResults( searchid, searchid+1000 );
for (var rs in resultslice) {
results.push( resultslice[rs] );
searchid++;
}
} while (resultslice.length >= 1000);
return results;
Try out this one :
function returnCSVFile(){
function escapeCSV(val){
if(!val) return '';
if(!(/[",\s]/).test(val)) return val;
val = val.replace(/"/g, '""');
return '"'+ val + '"';
}
function makeHeader(firstLine){
var cols = firstLine.getAllColumns();
var hdr = [];
cols.forEach(function(c){
var lbl = c.getLabel(); // column must have a custom label to be included.
if(lbl){
hdr.push(escapeCSV(lbl));
}
});
return hdr.join(",");
}
function makeLine(srchRow){
var cols = srchRow.getAllColumns();
var line = [];
cols.forEach(function(c){
if(c.getLabel()){
line.push(escapeCSV(srchRow.getText(c) || srchRow.getValue(c)));
}
});
return line.join(",");
}
function getDLFileName(prefix){
function pad(v){ if(v >= 10) return v; return "0"+v;}
var now = new Date();
return prefix + '-'+ now.getFullYear() + pad(now.getMonth()+1)+ pad(now.getDate()) + pad( now.getHours()) +pad(now.getMinutes()) + ".csv";
}
var srchRows = getItems('item', 'customsearch219729'); //function that returns your saved search results
if(!srchRows) throw nlapiCreateError("SRCH_RESULT", "No results from search");
var fileLines = [makeHeader(srchRows[0])];
srchRows.forEach(function(soLine){
fileLines.push(makeLine(soLine));
});
var file = nlapiCreateFile('InventoryUpdate.csv', 'CSV', fileLines.join('\r\n'));
nlapiSendEmail(768, 5, 'Test csv Mail','csv', null, null, null, file);
}
function getItems(recordType, searchId) {
var savedSearch = nlapiLoadSearch(recordType, searchId);
var resultset = savedSearch.runSearch();
var returnSearchResults = [];
var searchid = 0;
do {
var resultslice = resultset.getResults(searchid, searchid + 1000);
for ( var rs in resultslice) {
returnSearchResults.push(resultslice[rs]);
searchid++;
}
} while (resultslice.length >= 1000);
return returnSearchResults;
}
I looked into your code but it seems you're missing the label headers in the generated CSV file. If you are bound to use your existing code then just replace
var search = nlapiSearchRecord('item', 'customsearch219729');
with
var search = getItems('item', 'customsearch219729');
and just use the mentioned helper function to get rid off the 1000 result limit.
Cheers!
I appreciate it has been a while since this was posted and replied to but for others looking for a more generic response to the original question the following code should suffice:
var search = nlapiLoadSearch('record_type', 'savedsearch_id');
var searchresults = search.runSearch();
var resultIndex = 0;
var resultStep = 1000;
var resultSet;
do {
resultSet = searchresults.getResults(resultIndex, resultIndex + resultStep); // retrieves all possible results up to the 1000 max returned
resultIndex = resultIndex + resultStep; // increment the starting point for the next batch of records
for(var i = 0; !!resultSet && i < resultSet.length; i++){ // loop through the search results
// Your code goes here to work on a the current resultSet (upto 1000 records per pass)
}
} while (resultSet.length > 0)
Also worth mentioning, if your code is going to be updating fields / records / creating records you need to bear in mind script governance.
Moving your code to a scheduled script to process large volumes of records is more efficient and allows you to handle governance.
The following line:
var savedsearch = nlapiCreateSearch( 'customrecord_mybigfatlist', filters, columns );
can be adapted to your own saved search like this:
var savedsearch = nlapiLoadSearch('item', 'customsearch219729');
Hope this helps.

How can I create a new document out of a subset of another document's pages (in InDesign (CS6) using ExtendScript)?

I need to offer a feature which allows InDesign users to select a page range in an InDesign document and create a new document out of those pages. This sounds simple, but it isn't...
I have tried many different ways of doing this but they have all failed to some degree. Some methods put all pages in a single spread (which sometimes makes InDesign crash). The best I've been able to do (see code below) still has problems at the beginning and the end (see screenshots below):
The original document:
The new document:
The question: How can I create a new document out of a subset of another document's pages (in InDesign using ExtendScript) without having the problems shown in the screenshots?
note: The behavior of the script is quite different in CS5.5 and CS6. My question concerns CS6.
The second screenshot was obtained by applying the following code to the document shown in the first screenshot:
CODE
var firstPageName = { editContents: "117" }; // This page number is actually entered by the user in an integerEditbox
var lastPageName = { editContents: "136" }; // This page number is actually entered by the user in an integerEditbox
var sourceDocument = app.activeDocument;
var destDocument = app.documents.add();
destDocument.importStyles(ImportFormat.paragraphStylesFormat, new File(sourceDocument.filePath + "/" + sourceDocument.name), GlobalClashResolutionStrategy.LOAD_ALL_WITH_OVERWRITE);
destDocument.importStyles(ImportFormat.characterStylesFormat, new File(sourceDocument.filePath + "/" + sourceDocument.name), GlobalClashResolutionStrategy.LOAD_ALL_WITH_OVERWRITE);
destDocument.viewPreferences.horizontalMeasurementUnits = sourceDocument.viewPreferences.horizontalMeasurementUnits;
destDocument.viewPreferences.verticalMeasurementUnits = sourceDocument.viewPreferences.verticalMeasurementUnits;
destDocument.documentPreferences.facingPages = sourceDocument.documentPreferences.facingPages;
destDocument.documentPreferences.pageHeight = sourceDocument.documentPreferences.pageHeight;
destDocument.documentPreferences.pageWidth = sourceDocument.documentPreferences.pageWidth;
destDocument.documentPreferences.pageSize = sourceDocument.documentPreferences.pageSize;
var sourceSpreads = sourceDocument.spreads;
var nbSourceSpreads = sourceSpreads.length;
var firstPageFound = false;
var lastPageFound = false;
var i;
var newSpreadNeeded;
var currentDestSpread;
for (i = 0; !lastPageFound, i < nbSourceSpreads; ++i) {
newSpreadNeeded = true;
var sourcePages = sourceSpreads[i].pages;
var nbSourcePages = sourcePages.length;
var j;
for (j = 0; !lastPageFound, j < nbSourcePages; ++j) {
if (sourcePages[j].name === firstPageName.editContents) {
firstPageFound = true;
destDocument.documentPreferences.startPageNumber = parseInt(firstPageName.editContents); // We want to preserve page numbers
}
if (firstPageFound) {
// Copy this page over to the new document.
var firstInNewSpread = false;
if (newSpreadNeeded) {
currentDestSpread = destDocument.spreads.add();
newSpreadNeeded = false;
firstInNewSpread = true;
}
var newPage = sourcePages[j].duplicate(LocationOptions.AT_END, currentDestSpread);
var k;
for (k = 0; k < newPage.index; ++k) {
currentDestSpread.pages[k].remove();
}
}
if (sourcePages[j].name === lastPageName.editContents) {
lastPageFound = true;
}
}
}
destDocument.spreads[0].remove();
I was hacking around and came up with this little script. Although it approaches the problem from the opposite direction, it seems to work fine here. Also, I'm still running in InDesign CS5, but maybe it will work for you. Hopefully I got the gist of your question?
This will extract pages 3 through 5 into a separate document:
var doc = app.activeDocument;
var newFilePath = doc.filePath + "/subset_" + doc.name;
var newFile = File(newFilePath); // Create a new file path
doc.saveACopy(newFile); // Save a copy of the doc
var newDoc = app.open(newFile); // Open the copy
var firstPageNum = 3; // First page number in the range
var lastPageNum = 5; // Last page number in the range
var firstPage = newDoc.pages[firstPageNum-1];
var lastPage = newDoc.pages[lastPageNum-1];
// Remove all text from the last page in the range to the end of the document
var lastPageFrames = lastPage.textFrames.everyItem().getElements();
for (var i=0; i < lastPageFrames.length; i++) {
var frame = lastPageFrames[i];
var parentStory = frame.parentStory;
var lastFrameInsert = frame.insertionPoints.lastItem();
var lastStoryInsert = parentStory.insertionPoints.lastItem();
var textAfter = parentStory.insertionPoints.itemByRange(lastFrameInsert,lastStoryInsert);
textAfter.remove();
};
// Remove all text from the beginning of the document to the first page in the range
var firstPageFrames = firstPage.textFrames.everyItem().getElements();
for (var i=0; i < firstPageFrames.length; i++) {
var frame = firstPageFrames[i];
var parentStory = frame.parentStory;
var firstFrameInsert = frame.insertionPoints.firstItem();
var textBefore = parentStory.insertionPoints.itemByRange(0,firstFrameInsert.index);
textBefore.remove();
};
// Remove the pages that aren't in the range
var allPages = newDoc.pages.everyItem().getElements();
for (var i=0; i < allPages.length; i++) {
var page = allPages[i];
if (i < firstPageNum || i > lastPageNum) {
page.remove();
}
};

I'm working on creating a static node.js server

I am working on creating a static node.js server that just serves up the plain html, css, and javascript that is in the specified directory. I am trying to get the server to read every subdirectory and route the url to the file it specifies. However it only reads the root directory.
var fs = require('fs');
var array = fs.readdirSync(__dirname);
function getAllSub(array){
for (i = 0; i < array.length; i++){
if (array[i].indexOf(".") == (-1))
{
array = array.concat(array[i] + "/" + fs.readdirSync(__dirname + "/" + array[i]));
}
if (array[i].indexOf("/") != (-1)){
var foldcon = array[i].substr(array[i].indexOf("/") + 1);
var folder = array[i].substr(0, array[i].indexOf("/"));
foldcon = foldcon.split(",");
for (n = 0; n < foldcon.length; n++){
foldcon[n] = folder + "/" + foldcon[n]
if (foldcon[n].indexOf(".") == (-1)){
console.log([foldcon[n]]);
foldcon[n] = getAllSub([foldcon[n]]);
}
}
array.splice(i, 1, foldcon);
}
}
return array;
}
array = getAllSub(array);
console.log(array);
Right now this code reads the directory and it recognizes if an item in the array of files is a folder, however it doesn't add the files from the subdirectories into the array properly. Right now it kinda goes all infinite recursion, and I can't really figure out how to stop it.
This isn't meant to be something I am actually going to use, I just thought it would be a good project to work on to introduce myself to the basics of node.js
edited^
I know it's late, but this is the right answer for a recursive solution to reading file paths in sub-folders:
var fs = require("fs");
/**
* Recurse through a directory and populate an array with all the file paths beneath
* #param {string} path The path to start searching
* #param {array} allFiles Modified to contain all the file paths
*/
function readdirSyncRecursive(path, allFiles) {
var stats = fs.statSync(path);
if (stats.isFile()) {
// base case
allFiles.push(path);
} else if (stats.isDirectory()) {
// induction step
fs.readdirSync(path).forEach(function(fileName) {
readdirSyncRecursive(path + "/" + fileName, allFiles);
});
}
}
var allFiles = [];
readdirSyncRecursive("/path/to/search", allFiles);
console.info(allFiles);
var fs = require('fs');
var array = fs.readdirSync(__dirname);
for (i = 0; i < array.length; i++){
if (array[i].indexOf(".") == (-1))
{
// you need to use the return value from concat
array = array.concat(array[i] + "/" + fs.readdirSync(__dirname + "/" + array[i]));
console.log('if executed');
}
}
console.log(array);

Resources