How to control the output size when exporting images from an InDesign Document? - extendscript

I have a script that loop through the document images and write them to the disk, but I would like to know how to control the output size of the images. Right now images seems to be exported the same size as their source, with the exception where smaller images are getting up-scaled.
var doc = app.activeDocument;
var images = doc.allGraphics;
var documentPath = doc.filePath;
app.jpegExportPreferences.jpegColorSpace = JpegColorSpaceEnum.RGB;
app.jpegExportPreferences.jpegQuality = JPEGOptionsQuality.MAXIMUM;
app.jpegExportPreferences.exportResolution = 300;
for(var i=0; i < images.length; i++){
filename = documentPath + "/image_" + (i+1) + ".jpg";
var imageFile = new File(filename);
images[i].exportFile(ExportFormat.JPG, imageFile, false);
}

Related

extendscript. how to save artlayer as png

How can i save a ArtLayer as a png file? Are there any Action APIs I can use?
var layer = activeDocument.layers[2];
layer.copy();
var myDoc = app.documents.add(2000, 1000, 72, 'export-demo');
myDoc.paste();
var type = new PNGSaveOptions();
var fileSpec = new File("e:/ps/" + layer.name + ".png");
myDoc.saveAs(fileSpec, type);
Please note: I don't have the rep to comment and ask for more details. So I figured that I would just post the answer that I know.
There would be a few ways to accomplish this. But the easiest way that I've found to do this is to turn off the visibility of all other layers, then export a PNG. This would look something like this:
#target photoshop
var activeDoc = app.activeDocument;
var layers = activeDoc.layers;
var focusLayer = layers[ 2 ];
var visibleLayers = [];
for ( var i = 0; i < layers.length; i++ ) {
// Add visible layers to array so they can be turned back on after export
if ( layers[ i ].visible ) visibleLayers.push( layers[ i ] );
// Turn off visibility of all layers except the focus layer
if ( layers[ i ] !== focusLayer ) layers[ i ].visible = false;
}
// Export PNG * Change options to your liking
var filepath = "e:/ps/" + layer.name + ".png";
var file = File( filepath );
var exportOptions = new ExportOptionsSaveForWeb();
exportOptions.format = SaveDocumentType.PNG;
exportOptions.PNG8 = false; // false = PNG-24
exportOptions.transparency = true; // true = transparent
exportOptions.interlaced = false; // true = interlacing on
exportOptions.includeProfile = false; // false = don't embedd ICC profile
activeDoc.exportDocument(file, ExportType.SAVEFORWEB, exportOptions);
// Turn visible layers back on
for ( var i = 0; i < visibleLayers.length; i++ ) {
visibleLayers[ i ].visible = true;
}
The downside to this method is that it only exports to the size of the PSD and does not crop to the layer image dimensions.

web console - grab all links - HELP needed

I found this useful code
var x = document.querySelectorAll("a");
var myarray = []
for (var i=0; i<x.length; i++){
var nametext = x[i].textContent;
var cleantext = nametext.replace(/\s+/g, ' ').trim();
var cleanlink = x[i].href;
myarray.push([cleantext,cleanlink]);
};
function make_table() {
var table = '<table><thead><th>Name</th><th>Links</th></thead><tbody>';
for (var i=0; i<myarray.length; i++) {
table += '<tr><td>'+ myarray[i][0] + '</td><td>'+myarray[i][1]+'</td></tr>';
};
var w = window.open("");
w.document.write(table);
}
make_table()
When you add this to the web browser console - it opens up a new webpage with all weblinks with names
however some of the names are shortned.
is there a way to edit it so it shows the entire text name for that link

download and split large file into 100 MB chunks in blob storage

I have a 2GB file in blob storage and am building a console application that will download this file into a desktop. Requirement is to split into 100MB chunks and append a number into the filename. I do not need to re-combine those files again. What I need is only the chunks of files.
I currently have this code from Azure download blob part
But I cannot figure out how to stop downloading when the file size is already 100MB and create a new one.
Any help will be appreciated.
Update: Here is my code
CloudStorageAccount account = CloudStorageAccount.Parse(connectionString);
var blobClient = account.CreateCloudBlobClient();
var container = blobClient.GetContainerReference(containerName);
var file = uri;
var blob = container.GetBlockBlobReference(file);
//First fetch the size of the blob. We use this to create an empty file with size = blob's size
blob.FetchAttributes();
var blobSize = blob.Properties.Length;
long blockSize = (1 * 1024 * 1024);//1 MB chunk;
blockSize = Math.Min(blobSize, blockSize);
//Create an empty file of blob size
using (FileStream fs = new FileStream(file, FileMode.Create))//Create empty file.
{
fs.SetLength(blobSize);//Set its size
}
var blobRequestOptions = new BlobRequestOptions
{
RetryPolicy = new ExponentialRetry(TimeSpan.FromSeconds(5), 3),
MaximumExecutionTime = TimeSpan.FromMinutes(60),
ServerTimeout = TimeSpan.FromMinutes(60)
};
long startPosition = 0;
long currentPointer = 0;
long bytesRemaining = blobSize;
do
{
var bytesToFetch = Math.Min(blockSize, bytesRemaining);
using (MemoryStream ms = new MemoryStream())
{
//Download range (by default 1 MB)
blob.DownloadRangeToStream(ms, currentPointer, bytesToFetch, null, blobRequestOptions);
ms.Position = 0;
var contents = ms.ToArray();
using (var fs = new FileStream(file, FileMode.Open))//Open that file
{
fs.Position = currentPointer;//Move the cursor to the end of file.
fs.Write(contents, 0, contents.Length);//Write the contents to the end of file.
}
startPosition += blockSize;
currentPointer += contents.Length;//Update pointer
bytesRemaining -= contents.Length;//Update bytes to fetch
Console.WriteLine(fileName + dateTimeStamp + ".csv " + (startPosition / 1024 / 1024) + "/" + (blob.Properties.Length / 1024 / 1024) + " MB downloaded...");
}
}
while (bytesRemaining > 0);
Per my understanding, you could break your blob file into your expected pieces (100MB), then leverage CloudBlockBlob.DownloadRangeToStream to download each of your chunks of files. Here is my code snippet, you could refer to it:
ParallelDownloadBlob
private static void ParallelDownloadBlob(Stream outPutStream, CloudBlockBlob blob,long startRange,long endRange)
{
blob.FetchAttributes();
int bufferLength = 1 * 1024 * 1024;//1 MB chunk for download
long blobRemainingLength = endRange-startRange;
Queue<KeyValuePair<long, long>> queues = new Queue<KeyValuePair<long, long>>();
long offset = startRange;
while (blobRemainingLength > 0)
{
long chunkLength = (long)Math.Min(bufferLength, blobRemainingLength);
queues.Enqueue(new KeyValuePair<long, long>(offset, chunkLength));
offset += chunkLength;
blobRemainingLength -= chunkLength;
}
Parallel.ForEach(queues,
new ParallelOptions()
{
MaxDegreeOfParallelism = 5
}, (queue) =>
{
using (var ms = new MemoryStream())
{
blob.DownloadRangeToStream(ms, queue.Key, queue.Value);
lock (outPutStream)
{
outPutStream.Position = queue.Key- startRange;
var bytes = ms.ToArray();
outPutStream.Write(bytes, 0, bytes.Length);
}
}
});
}
Program Main
var container = storageAccount.CreateCloudBlobClient().GetContainerReference(defaultContainerName);
var blob = container.GetBlockBlobReference("code.txt");
blob.FetchAttributes();
long blobTotalLength = blob.Properties.Length;
long chunkLength = 10 * 1024; //divide blob file into each file with 10KB in size
for (long i = 0; i <= blobTotalLength; i += chunkLength)
{
long startRange = i;
long endRange = (i + chunkLength) > blobTotalLength ? blobTotalLength : (i + chunkLength);
using (var fs = new FileStream(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, $"resources\\code_[{startRange}]_[{endRange}].txt"), FileMode.Create))
{
Console.WriteLine($"\nParallelDownloadBlob from range [{startRange}] to [{endRange}] start...");
Stopwatch sp = new Stopwatch();
sp.Start();
ParallelDownloadBlob(fs, blob, startRange, endRange);
sp.Stop();
Console.WriteLine($"download done, time cost:{sp.ElapsedMilliseconds / 1000.0}s");
}
}
RESULT
UPDATE:
Based on your requirement, I recommend that you could download your blob into a single file, then leverage LumenWorks.Framework.IO to read your large file records line by line, then check the byte size you have read and save into a new csv file with the size up to 100MB. Here is a code snippet, you could refer to it:
using (CsvReader csv = new CsvReader(new StreamReader("data.csv"), true))
{
int fieldCount = csv.FieldCount;
string[] headers = csv.GetFieldHeaders();
while (csv.ReadNextRecord())
{
for (int i = 0; i < fieldCount; i++)
Console.Write(string.Format("{0} = {1};",
headers[i],
csv[i] == null ? "MISSING" : csv[i]));
//TODO:
//1.Read the current record, check the total bytes you have read;
//2.Create a new csv file if the current total bytes up to 100MB, then save the current record to the current CSV file.
}
}
Additionally, you could refer to A Fast CSV Reader and CsvHelper for more details.
UPDATE2
Code sample for breaking large CSV file into small CSV file with the fixed bytes, I used CsvHelper 2.16.3 for the following code snippet, you could refer to it:
string[] headers = new string[0];
using (var sr = new StreamReader(#"C:\Users\v-brucch\Desktop\BlobHourMetrics.csv")) //83.9KB
{
using (CsvHelper.CsvReader csvReader = new CsvHelper.CsvReader(sr,
new CsvHelper.Configuration.CsvConfiguration()
{
Delimiter = ",",
Encoding = Encoding.UTF8
}))
{
//check header
if (csvReader.ReadHeader())
{
headers = csvReader.FieldHeaders;
}
TextWriter writer = null;
CsvWriter csvWriter = null;
long readBytesCount = 0;
long chunkSize = 30 * 1024; //divide CSV file into each CSV file with byte size up to 30KB
while (csvReader.Read())
{
var curRecord = csvReader.CurrentRecord;
var curRecordByteCount = curRecord.Sum(r => Encoding.UTF8.GetByteCount(r)) + headers.Count() + 1;
readBytesCount += curRecordByteCount;
//check bytes you have read
if (writer == null || readBytesCount > chunkSize)
{
readBytesCount = curRecordByteCount + headers.Sum(h => Encoding.UTF8.GetByteCount(h)) + headers.Count() + 1;
if (writer != null)
{
writer.Flush();
writer.Close();
}
string fileName = $"BlobHourMetrics_{Guid.NewGuid()}.csv";
writer = new StreamWriter(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, fileName), true);
csvWriter = new CsvWriter(writer);
csvWriter.Configuration.Encoding = Encoding.UTF8;
//output header field
foreach (var header in headers)
{
csvWriter.WriteField(header);
}
csvWriter.NextRecord();
}
//output record field
foreach (var field in curRecord)
{
csvWriter.WriteField(field);
}
csvWriter.NextRecord();
}
if (writer != null)
{
writer.Flush();
writer.Close();
}
}
}
RESULT

Azure server not letting me use a NuGet package

I have a website hosted by Azure that includes a Web API which I'm using to develop an android app. I'm trying to upload a media file to the server where it's encoded by a media encoder and saved to a path. The encoder library is called "Media Toolkit" which I found here : https://www.nuget.org/packages/MediaToolkit/1.0.0.3
My server side code looks like this:
[HttpPost]
[Route("upload")]
public async Task<HttpResponseMessage> Upload(uploadFileModel model)
{
var result = new HttpResponseMessage(HttpStatusCode.OK);
if (ModelState.IsValid)
{
string thumbname = "";
string resizedthumbname = Guid.NewGuid() + "_yt.jpg";
string FfmpegPath = Encoding_Settings.FFMPEGPATH;
string tempFilePath = Path.Combine(HttpContext.Current.Server.MapPath("~/video"), model.fileName);
string pathToFiles = HttpContext.Current.Server.MapPath("~/video");
string pathToThumbs = HttpContext.Current.Server.MapPath("~/contents/member/" + model.username + "/thumbs");
string finalPath = HttpContext.Current.Server.MapPath("~/contents/member/" + model.username + "/flv");
string resizedthumb = Path.Combine(pathToThumbs, resizedthumbname);
var outputPathVid = new MediaFile { Filename = Path.Combine(finalPath, model.fileName) };
var inputPathVid = new MediaFile { Filename = Path.Combine(pathToFiles, model.fileName) };
int maxWidth = 380;
int maxHeight = 360;
var namewithoutext = Path.GetFileNameWithoutExtension(Path.Combine(pathToFiles, model.fileName));
thumbname = model.VideoThumbName;
string oldthumbpath = Path.Combine(pathToThumbs, thumbname);
var fileName = model.fileName;
try
{
File.WriteAllBytes(tempFilePath, model.data);
}
catch (Exception e)
{
Console.WriteLine(e.Message);
}
using (var engine = new Engine())
{
engine.GetMetadata(inputPathVid);
// Saves the frame located on the 15th second of the video.
var outputPathThumb = new MediaFile { Filename = Path.Combine(pathToThumbs, thumbname + ".jpg") };
var options = new ConversionOptions { Seek = TimeSpan.FromSeconds(0), CustomHeight = 360, CustomWidth = 380 };
engine.GetThumbnail(inputPathVid, outputPathThumb, options);
}
Image image = Image.FromFile(Path.Combine(pathToThumbs, thumbname + ".jpg"));
//var ratioX = (double)maxWidth / image.Width;
//var ratioY = (double)maxHeight / image.Height;
//var ratio = Math.Min(ratioX, ratioY);
var newWidth = (int)(maxWidth);
var newHeight = (int)(maxHeight);
var newImage = new Bitmap(newWidth, newHeight);
Graphics.FromImage(newImage).DrawImage(image, 0, 0, newWidth, newHeight);
Bitmap bmp = new Bitmap(newImage);
bmp.Save(Path.Combine(pathToThumbs, thumbname + "_resized.jpg"));
//File.Delete(Path.Combine(pathToThumbs, thumbname));
using (var engine = new Engine())
{
var conversionOptions = new ConversionOptions
{
VideoSize = VideoSize.Hd720,
AudioSampleRate = AudioSampleRate.Hz44100,
VideoAspectRatio = VideoAspectRatio.Default
};
engine.GetMetadata(inputPathVid);
engine.Convert(inputPathVid, outputPathVid, conversionOptions);
}
File.Delete(tempFilePath);
Video_Struct vd = new Video_Struct();
vd.CategoryID = 0; // store categoryname or term instead of category id
vd.Categories = "";
vd.UserName = model.username;
vd.Title = "";
vd.Description = "";
vd.Tags = "";
vd.Duration = inputPathVid.Metadata.Duration.ToString();
vd.Duration_Sec = Convert.ToInt32(inputPathVid.Metadata.Duration.Seconds.ToString());
vd.OriginalVideoFileName = model.fileName;
vd.VideoFileName = model.fileName;
vd.ThumbFileName = thumbname + "_resized.jpg";
vd.isPrivate = 0;
vd.AuthKey = "";
vd.isEnabled = 1;
vd.Response_VideoID = 0; // video responses
vd.isResponse = 0;
vd.isPublished = 1;
vd.isReviewed = 1;
vd.Thumb_Url = "none";
//vd.FLV_Url = flv_url;
vd.Embed_Script = "";
vd.isExternal = 0; // website own video, 1: embed video
vd.Type = 0;
vd.YoutubeID = "";
vd.isTagsreViewed = 1;
vd.Mode = 0; // filter videos based on website sections
//vd.ContentLength = f_contentlength;
vd.GalleryID = 0;
long videoid = VideoBLL.Process_Info(vd, false);
return result;
}
else
{
throw new HttpResponseException(Request.CreateResponse(HttpStatusCode.NotAcceptable, "This request is not properly formatted"));
}
}
When the debugger hits the line using (var engine = new Engine()) I get 500 internal server error thrown. I don't get this error testing it on the iis server. Since it works fine on my local server and not on the azure hosted server, I figured it had to do with the Azure service rather than an error in my code. If so is the case then how would I be able to get around this issue? I don't want to use azure blob storage as it would require a lot of changes to my code. Does anyone have any idea what might be the issue.
Any helpful suggestions are appreciated.
Server.MapPath works differently on Azure WebApps - change to:
string pathToFiles = HttpContext.Current.Server.MapPath("~//video");
Also, see this SO post for another approach.

How can I create a new document out of a subset of another document's pages (in InDesign (CS6) using ExtendScript)?

I need to offer a feature which allows InDesign users to select a page range in an InDesign document and create a new document out of those pages. This sounds simple, but it isn't...
I have tried many different ways of doing this but they have all failed to some degree. Some methods put all pages in a single spread (which sometimes makes InDesign crash). The best I've been able to do (see code below) still has problems at the beginning and the end (see screenshots below):
The original document:
The new document:
The question: How can I create a new document out of a subset of another document's pages (in InDesign using ExtendScript) without having the problems shown in the screenshots?
note: The behavior of the script is quite different in CS5.5 and CS6. My question concerns CS6.
The second screenshot was obtained by applying the following code to the document shown in the first screenshot:
CODE
var firstPageName = { editContents: "117" }; // This page number is actually entered by the user in an integerEditbox
var lastPageName = { editContents: "136" }; // This page number is actually entered by the user in an integerEditbox
var sourceDocument = app.activeDocument;
var destDocument = app.documents.add();
destDocument.importStyles(ImportFormat.paragraphStylesFormat, new File(sourceDocument.filePath + "/" + sourceDocument.name), GlobalClashResolutionStrategy.LOAD_ALL_WITH_OVERWRITE);
destDocument.importStyles(ImportFormat.characterStylesFormat, new File(sourceDocument.filePath + "/" + sourceDocument.name), GlobalClashResolutionStrategy.LOAD_ALL_WITH_OVERWRITE);
destDocument.viewPreferences.horizontalMeasurementUnits = sourceDocument.viewPreferences.horizontalMeasurementUnits;
destDocument.viewPreferences.verticalMeasurementUnits = sourceDocument.viewPreferences.verticalMeasurementUnits;
destDocument.documentPreferences.facingPages = sourceDocument.documentPreferences.facingPages;
destDocument.documentPreferences.pageHeight = sourceDocument.documentPreferences.pageHeight;
destDocument.documentPreferences.pageWidth = sourceDocument.documentPreferences.pageWidth;
destDocument.documentPreferences.pageSize = sourceDocument.documentPreferences.pageSize;
var sourceSpreads = sourceDocument.spreads;
var nbSourceSpreads = sourceSpreads.length;
var firstPageFound = false;
var lastPageFound = false;
var i;
var newSpreadNeeded;
var currentDestSpread;
for (i = 0; !lastPageFound, i < nbSourceSpreads; ++i) {
newSpreadNeeded = true;
var sourcePages = sourceSpreads[i].pages;
var nbSourcePages = sourcePages.length;
var j;
for (j = 0; !lastPageFound, j < nbSourcePages; ++j) {
if (sourcePages[j].name === firstPageName.editContents) {
firstPageFound = true;
destDocument.documentPreferences.startPageNumber = parseInt(firstPageName.editContents); // We want to preserve page numbers
}
if (firstPageFound) {
// Copy this page over to the new document.
var firstInNewSpread = false;
if (newSpreadNeeded) {
currentDestSpread = destDocument.spreads.add();
newSpreadNeeded = false;
firstInNewSpread = true;
}
var newPage = sourcePages[j].duplicate(LocationOptions.AT_END, currentDestSpread);
var k;
for (k = 0; k < newPage.index; ++k) {
currentDestSpread.pages[k].remove();
}
}
if (sourcePages[j].name === lastPageName.editContents) {
lastPageFound = true;
}
}
}
destDocument.spreads[0].remove();
I was hacking around and came up with this little script. Although it approaches the problem from the opposite direction, it seems to work fine here. Also, I'm still running in InDesign CS5, but maybe it will work for you. Hopefully I got the gist of your question?
This will extract pages 3 through 5 into a separate document:
var doc = app.activeDocument;
var newFilePath = doc.filePath + "/subset_" + doc.name;
var newFile = File(newFilePath); // Create a new file path
doc.saveACopy(newFile); // Save a copy of the doc
var newDoc = app.open(newFile); // Open the copy
var firstPageNum = 3; // First page number in the range
var lastPageNum = 5; // Last page number in the range
var firstPage = newDoc.pages[firstPageNum-1];
var lastPage = newDoc.pages[lastPageNum-1];
// Remove all text from the last page in the range to the end of the document
var lastPageFrames = lastPage.textFrames.everyItem().getElements();
for (var i=0; i < lastPageFrames.length; i++) {
var frame = lastPageFrames[i];
var parentStory = frame.parentStory;
var lastFrameInsert = frame.insertionPoints.lastItem();
var lastStoryInsert = parentStory.insertionPoints.lastItem();
var textAfter = parentStory.insertionPoints.itemByRange(lastFrameInsert,lastStoryInsert);
textAfter.remove();
};
// Remove all text from the beginning of the document to the first page in the range
var firstPageFrames = firstPage.textFrames.everyItem().getElements();
for (var i=0; i < firstPageFrames.length; i++) {
var frame = firstPageFrames[i];
var parentStory = frame.parentStory;
var firstFrameInsert = frame.insertionPoints.firstItem();
var textBefore = parentStory.insertionPoints.itemByRange(0,firstFrameInsert.index);
textBefore.remove();
};
// Remove the pages that aren't in the range
var allPages = newDoc.pages.everyItem().getElements();
for (var i=0; i < allPages.length; i++) {
var page = allPages[i];
if (i < firstPageNum || i > lastPageNum) {
page.remove();
}
};

Resources