Bluemix Nodejs FileTransferStep, documentation - node.js

I am a newbie to bluemix. I downloaded the client libraries. But I don't see API docs for Javascript. Where do I find that? How do I go about calling several of javascript functions which is neither in the nodejs client libs nor I could find it online?

about the Workload service call you have to edit your package.json file
to add a dependency on the iws-light module using an https link, as follows
"dependencies": {
"iws-light": "https://start.wa.ibmserviceengage.com/bluemix/iws-light.tgz"
}
then you have to open your shell, go to the root of your app and run:
npm install
after this you can require the Workload Scheduler service in your application:
var ws = require("iws-light");
and create a connection to Bluemix:
//retrieve service URL from Bluemix VCAP_SERVICES...
var wsConn;
if(process.env.VCAP_SERVICES) {
wsConn = ws.createConnection();
} else {
//...or set it on your own(if you're working in local)
var url = "your workload scheduler url";
wsConn = ws.createConnection(url);
}
//retrieve cloud agent
var agentName;
wsConn.getCloudAgent(function(data) {
agentName = data;
});
//set your timezone
wsConn.setTimezone({timezone: "Europe/Rome"}, function(err, data){
if(err){
console.log(err);
}
});
now you're ready to use the lib and create a process
and add to it a FileTransferStep:
//create a process
var process = new ws.WAProcess("ProcessName", "This process transfer a file every day from local to remote server");
//supported operations are ws.steps.FileTransferStep.OperationDownload or ws.steps.FileTransferStep.OperationUpload
var operation = ws.steps.FileTransferStep.OperationUpload;
//create FileTransferStep
var ftStep = new ws.steps.FileTransferStep(agentName, operation);
//supported protocols are AUTO, FTP, FTPS, SSH, WINDOWS;
ftStep.setProtocol(ws.steps.FileTransferStep.ProtocolAuto);
//set local file
var local = {
path: "local file path",
user: "local username",
password: "local password"
};
ftStep.setLocalFile(local.path, local.user, local.password);
//set remote file
var remote = {
path: "remote file path",
user: "remote username",
password: "remote password",
server: "remote server"
};
ftStep.setRemoteFile(remote.server, remote.path, remote.user, remote.password);
//the binary mode flag: true if it uses FTP binary mode
var binaryMode = true;
the passive mode flag: true if it uses FTP passive mode
var passiveMode = true;
//set timeout
var timeout = 5;
ftStep.setMode(binaryMode, passiveMode , timeout);
//add FileTransferStep to the process
process.addStep(ftStep);
//create a trigger
var trigger = new ws.TriggerFactory.everyDayAt(1, 7, 30);
//add Trigger to the process
process.addTrigger(trigger);
process.tasklibraryid = "your task library id";
//create and enable process
wsConn.createAndEnableProcess(process, function(err, data){
if(err){
console.log(error);
} else{
console.log("process created and enabled");
}
});
The code above creates a process using a file transfer step from node.js code, however I'm not sure if this is what you actually need.
If you can explain the scenario you are trying to implement, I can be more precise about which is the best way to implement this scenario using Workload Scheduler service.
Regards,
Gabriele

Related

How to copy postman history from chrome app to native app?

Since Google is now ending the support for chrome apps. Recently Postman deprecated their chrome app and introduced a native app.
I am in the process of switching from postman chrome app to native app.
How do I copy the history from my chrome app to native app. Sync doesn't work.
There is a option to export data but that doesn't export the history.
Any Ideas?
So while searching for this I came across this post which is very helpful.
Thanks to stephan for sharing this code.
Follow these steps to copy your history from chrome app to native app.
//In Chrome DevTools on the background page of the Postman extension...
//A handy helper method that lets you save data from the console to a file
(function(console){
console.save = function(data, filename){
if(!data) {
console.error('Console.save: No data')
return;
}
if(!filename) filename = 'console.json'
if(typeof data === "object"){
data = JSON.stringify(data, undefined, 4)
}
var blob = new Blob([data], {type: 'text/json'}),
e = document.createEvent('MouseEvents'),
a = document.createElement('a')
a.download = filename
a.href = window.URL.createObjectURL(blob)
a.dataset.downloadurl = ['text/json', a.download, a.href].join(':')
e.initMouseEvent('click', true, false, window, 0, 0, 0, 0, 0, false, false, false, false, 0, null)
a.dispatchEvent(e)
}
})(console)
//Common error reporting function
function reportError(){
console.error('Oops, something went wrong :-(');
}
//Open the database
var dbReq = indexedDB.open('postman')
dbReq.onerror = reportError;
dbReq.onsuccess = function(){
var db = dbReq.result;
//Query for all the saved requests
var requestReq = db.transaction(["requests"],"readwrite").objectStore('requests').getAll();
requestReq.onerror = reportError;
requestReq.onsuccess = function(){
var requests = requestReq.result;
//Dump them to a file
console.save(JSON.stringify(requests), 'postman-requests-export.json')
console.info('Your existing requests have been exported to a file and downloaded to your computer. You will need to copy the contents of that file for the next part')
};
};
//Switch to standalone app and open the dev console
//Paste the text from the exported file here (overwriting the empty array)
var data = []
//Enter the guid/id of the workspace to import into. Run the script with this value blank if you need some help
// finding this value. Also, be sure you don't end up with extra quotes if you copy/paste the value
var ws = '';
//Common error reporting function
function reportError(){
console.error('Oops, something went wrong :-(');
}
//Open the database
var dbReq = indexedDB.open('postman-app')
dbReq.onerror = reportError;
dbReq.onsuccess = function(){
var db = dbReq.result;
if(!data.length){
console.error('You did not pass in any exported requests so there is nothing for this script to do. Perhaps you forgot to paste your request data?');
return;
}
if(!ws){
var wsReq = db.transaction(["workspace"],"readwrite").objectStore('workspace').getAll();
wsReq.onerror = reportError;
wsReq.onsuccess = function(){
console.error('You did not specify a workspace. Below is a dump of all your workspaces. Grab the guid (ID field) from the workspace you want these requests to show up under and include it at the top of this script');
console.log(wsReq.result);
}
return;
}
data.forEach(function(a){
a.workspace = ws;
db.transaction(["history"],"readwrite").objectStore('history').add(a);
});
console.log('Requests have been imported. Give it a second to finish up and then restart Postman')
}
//Restart Postman
Note :
1.To Use DevTools on your chrome app you will need to enable following flag in
chrome://flags
2.Then just right click and inspect on your chrome postman app.
3.To User DevTools on your native app ctrl+shift+I (view->showDevTools)

Bluemix - object storage - node.js - pkgcloud - openstack returns 401

I am trying to use pkgcloud (node.js) openstack with bluemix object storage, but when I put all the requested parameters as on official page, it always returns 401. I tried using postman as described on bluemix and it works.
I created a package, which is able to to authorize it right. It is just a copy of pkgcloud, with a few fixes.
EDIT: IT IS WORKING! The V2 supports was shot down by bluemix and it has only V3 support now, but I once again find the issues.
Remember to use newest version (2.0.0)
So this is how you can use it now :
var pkgcloud = require('pkgcloud-bluemix-objectstorage');
// Create a config object
var config = {};
// Specify Openstack as the provider
config.provider = "openstack";
// Authentication url
config.authUrl = 'https://identity.open.softlayer.com/';
config.region= 'dallas';
// Use the service catalog
config.useServiceCatalog = true;
// true for applications running inside Bluemix, otherwise false
config.useInternal = false;
// projectId as provided in your Service Credentials
config.tenantId = 'xxx';
// userId as provided in your Service Credentials
config.userId = 'xxx';
// username as provided in your Service Credentials
config.username = 'xxx';
// password as provided in your Service Credentials
config.password = 'xxx';
// This is part which is NOT in original pkgcloud. This is how it works with newest version of bluemix and pkgcloud at 22.12.2015.
//In reality, anything you put in this config.auth will be send in body to server, so if you need change anything to make it work, you can. PS : Yes, these are the same credentials as you put to config before.
//I do not fill this automatically to make it transparent.
config.auth = {
forceUri : "https://identity.open.softlayer.com/v3/auth/tokens", //force uri to v3, usually you take the baseurl for authentication and add this to it /v3/auth/tokens (at least in bluemix)
interfaceName : "public", //use public for apps outside bluemix and internal for apps inside bluemix. There is also admin interface, I personally do not know, what it is for.
"identity": {
"methods": [
"password"
],
"password": {
"user": {
"id": "***", //userId
"password": "***" //userPassword
}
}
},
"scope": {
"project": {
"id": "***" //projectId
}
}
};
console.log("config: " + JSON.stringify(config));
// Create a pkgcloud storage client
var storageClient = pkgcloud.storage.createClient(config);
// Authenticate to OpenStack
storageClient.auth(function (error) {
if (error) {
console.error("storageClient.auth() : error creating storage client: ", error);
}
else {
// Print the identity object which contains your Keystone token.
console.log("storageClient.auth() : created storage client: " + JSON.stringify(storageClient._identity));
}
});
PS : You should be able to connect to this service outside of bluemix, therefore you can test it on your localhost.
Lines below are for old content for version 1.2.3, read only if you want to use v2 version of pkgcloud which was working with bluemix before January 2016
EDIT: It looks like that bluemix shut down support for v2 openstack and only supports v3, which is not supported by pkgcloud at all. So this does not work anymore (at least for me).
The problem is actually between pkgcloud and bluemix authorization process. Bluemix is expecting a little diffent authorization. I created a package, which is able to to authorize it right. It is just a copy of pkgcloud, with a few fixes.
And this is how you can use it :
var pkgcloud = require('pkgcloud-bluemix-objectstorage');
// Create a config object
var config = {};
// Specify Openstack as the provider
config.provider = "openstack";
// Authentication url
config.authUrl = 'https://identity.open.softlayer.com/';
config.region= 'dallas';
// Use the service catalog
config.useServiceCatalog = true;
// true for applications running inside Bluemix, otherwise false
config.useInternal = false;
// projectId as provided in your Service Credentials
config.tenantId = 'xxx';
// userId as provided in your Service Credentials
config.userId = 'xxx';
// username as provided in your Service Credentials
config.username = 'xxx';
// password as provided in your Service Credentials
config.password = 'xxx';
// This is part which is NOT in original pkgcloud. This is how it works with newest version of bluemix and pkgcloud at 22.12.2015.
//In reality, anything you put in this config.auth will be send in body to server, so if you need change anything to make it work, you can. PS : Yes, these are the same credentials as you put to config before.
//I do not fill this automatically to make it transparent.
config.auth = {
tenantId: "xxx", //projectId
passwordCredentials: {
userId: "xxx", //userId
password: "xxx" //password
}
};
console.log("config: " + JSON.stringify(config));
// Create a pkgcloud storage client
var storageClient = pkgcloud.storage.createClient(config);
// Authenticate to OpenStack
storageClient.auth(function (error) {
if (error) {
console.error("storageClient.auth() : error creating storage client: ", error);
}
else {
// Print the identity object which contains your Keystone token.
console.log("storageClient.auth() : created storage client: " + JSON.stringify(storageClient._identity));
}
});

Headless browsers and Windows Azure Websites

I´m trying to use a headless browser for crawling purposes to add SEO features in a open source project i´m developing.
The project sample site is deployed via Azure Websites.
I tried several ways to get the task working using different solutions like Selenium .NET (PhantomJSDriver, HTMLUnitDriver, ...) or even standalone PhantomJs .exe file.
I´m using a headless browser because the site is based in DurandalJS, so it needs to execute scripts and wait for a condition to be true in order to return the generated HTML. For this reason, can´t use things like WebClient/WebResponse classes or HTMLAgilityPack which use to work just fine for non-javascript sites.
All the above methods works in my devbox localhost environment but the problem comes when uploading the site to Azure Websites. When using standalone phantomjs the site freezes when accessing the url endpoint and after a while return a HTTP 502 error. In case of using Selenium Webdriver i´m getting a
OpenQA.Selenium.WebDriverException: Unexpected error. System.Net.WebException: Unable to connect to the remote server ---> System.Net.Sockets.SocketException: No connection could be made because the target machine actively refused it 127.0.0.1:XXXX
I think the problem is with running .exe files in Azure and not with the code. I know it´s possible to run .exe files in Azure CloudServices via WebRole/WebWorkers but need to stay in Azure Websites for keep things simple.
It´s possible to run a headless browser in Azure Websites? Anyone have experience with this type of situation?
My code for the standalone PhantomJS solution is
//ASP MVC ActionResult
public ActionResult GetHTML(string url)
{
string appRoot = Server.MapPath("~/");
var startInfo = new ProcessStartInfo
{
Arguments = String.Format("{0} {1}", Path.Combine(appRoot, "Scripts\\seo\\renderHTML.js"), url),
FileName = Path.Combine(appRoot, "bin\\phantomjs.exe"),
UseShellExecute = false,
CreateNoWindow = true,
RedirectStandardOutput = true,
RedirectStandardError = true,
RedirectStandardInput = true,
StandardOutputEncoding = System.Text.Encoding.UTF8
};
var p = new Process();
p.StartInfo = startInfo;
p.Start();
string output = p.StandardOutput.ReadToEnd();
p.WaitForExit();
ViewData["result"] = output;
return View();
}
// PhantomJS script
var resourceWait = 300,
maxRenderWait = 10000;
var page = require('webpage').create(),
system = require('system'),
count = 0,
forcedRenderTimeout,
renderTimeout;
page.viewportSize = { width: 1280, height: 1024 };
function doRender() {
console.log(page.content);
phantom.exit();
}
page.onResourceRequested = function (req) {
count += 1;
//console.log('> ' + req.id + ' - ' + req.url);
clearTimeout(renderTimeout);
};
page.onResourceReceived = function (res) {
if (!res.stage || res.stage === 'end') {
count -= 1;
//console.log(res.id + ' ' + res.status + ' - ' + res.url);
if (count === 0) {
renderTimeout = setTimeout(doRender, resourceWait);
}
}
};
page.open(system.args[1], function (status) {
if (status !== "success") {
//console.log('Unable to load url');
phantom.exit();
} else {
forcedRenderTimeout = setTimeout(function () {
//console.log(count);
doRender();
}, maxRenderWait);
}
});
and for the Selenium option
public ActionResult GetHTML(string url)
{
using (IWebDriver driver = new PhantomJSDriver())
{
driver.Navigate().GoToUrl(url);
WebDriverWait wait = new WebDriverWait(driver, TimeSpan.FromSeconds(30));
IWebElement myDynamicElement = wait.Until<IWebElement>((d) =>
{
return d.FindElement(By.CssSelector("#compositionComplete"));
});
var content = driver.PageSource;
driver.Quit();
return Content(content);
}
}
Thanks!!
You cannot execute exe files in the shared website environment, either you have to use the web services or you have to set up a proper (azure) virtual machine.
The free shared website service is really basic, and won't cut it when you need more advanced functionality.
See this question and accepted answer for a more elaborated answer: Can we run windowservice or EXE in Azure website or in Virtual Machine?
I am not sure about shared and basic website environment but i am successfully run ffmpeg.exe from standart website environment. Despite that still phantomjs and even chromedriver itself is not working.
However i am able run Firefox driver successfully. In order to do that
I copied latest firefox directory from my local to website and below code worked well.
var binary = new FirefoxBinary("/websitefolder/blabla/firefox.exe");
var driver = new FirefoxDriver(binary, new FirefoxProfile());
driver.Navigate().GoToUrl("http://www.google.com");

Not able to add entities to a azure storage table in node.js when deployed to cloud?

I am using socket.io in node.js to implement chat functionality in my azure cloud project. In it i have been adding the user chat history to tables using node.js. It works fine when i run it on my local emulator, but strangely when i deploy to my azure cloud it doesnt work and it doesnt throw up any error either so its really mind boggling. Below is my code.
var app = require('express')()
, server = require('http').createServer(app)
, sio = require('socket.io')
, redis = require('redis');
var client = redis.createClient();
var io = sio.listen(server,{origins: '*:*'});
io.set("store", new sio.RedisStore);
process.env.AZURE_STORAGE_ACCOUNT = "account";
process.env.AZURE_STORAGE_ACCESS_KEY = "key";
var azure = require('azure');
var chatTableService = azure.createTableService();
createTable("ChatUser");
server.listen(4002);
socket.on('privateChat', function (data) {
var receiver = data.Receiver;
console.log(data.Username);
var chatGUID1 = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
var r = Math.random()*16|0, v = c == 'x' ? r : (r&0x3|0x8);
return v.toString(16);
});
var chatRecord1 = {
PartitionKey: data.Receiver,
RowKey: data.Username,
ChatID: chatGUID2,
Username: data.Receiver,
ChattedWithUsername: data.Username,
Timestamp: new Date(new Date().getTime())
};
console.log(chatRecord1.Timestamp);
queryEntity(chatRecord1);
}
function queryEntity(record1) {
chatTableService.queryEntity('ChatUser'
, record1.PartitionKey
, record1.RowKey
, function (error, entity) {
if (!error) {
console.log("Entity already exists")
}
else {
insertEntity(record1);
}
})
}
function insertEntity(record) {
chatTableService.insertEntity('ChatUser', record, function (error) {
if (!error) {
console.log("Entity inserted");
}
});
}
Its working on my local emulator but not on cloud and I came across a reading that DateTime variable of an entity should not be null when creating a record on cloud table. But am pretty sure the way am passing timestamp is fine, it is right? any other ideas why it might be working on local but not on cloud?
EDIT:
I hav also been getting this error when am running the socket.io server, but in spite of this error the socket.io functionality is working fine so i didnt bother to care about it. I have no idea what the error means in the first place.
{ [Error: connect ECONNREFUSED]
code: 'ECONNREFUSED',
errno: 'ECONNREFUSED',
syscall: 'connect' }
Couple things:
You shouldn't need to set Timestamp, the service should be populating that automatically when you insert a record.
When running it locally you can set the environment variables to the Windows Azure storage account settings and see if it will successfully write to the table when running on your developer box. Instead of running in the emulator, just set the environment variables and run the app directly with node.exe.
Are you running in a web role or worker role? I'm assuming it's a cloud service since you mentioned the emulator. If it's a worker role, maybe add some instrumentation to log to file to assist in debugging. If it's a web role you can add an iisnode.yml file in the root of the application, with the following line in the file to enable logging of stdout/stderr:
loggingEnabled: true
This will capture stdout/stderr to an iislog folder under the approot folder on e: or f: of the web role instance. You can remote desktop to the instance and look at the logs to see if the logs you have for successful insertion are occurring.
Otherwise, it's not obvious from the code above what's going on. Similar code worked fine for me. Relevant bits for my test code can be found at https://gist.github.com/Blackmist/5326756.
Hope this helps.

Execute a command in Windows Store app

I am developing a Windows Store app that has some URL-s. I just would like to reach is if I click on one of these, my app will switch to Internet Explorer.
Is there any way to do this?
Sándor
// The URI to launch
var uriToLaunch = "http://www.bing.com";
// Create a Uri object from a URI string
var uri = new Windows.Foundation.Uri(uriToLaunch);
Windows.System.Launcher.launchUriAsync(uri).then(
function (success) {
if (success) {
// URI launched
} else {
// URI launch failed
}
});
does this do it?

Resources