I’ve been learning NodeJS and have started to rebuild my Bot using v4 of the Bot Framework.
I have a Bot up and running in v3 using C# and it successfully uses a method where a QnA pairing is adapted into either a Hero Card, Video Card or standard reply depending on how many counts of a semi-colon the code finds.
I’m struggling to replicate this function in NodeJS using a string split method to determine what adaptive card should be used.
I’ve enclosed the original C# code and my partial attempt using NodeJS.
CSharp
protected override async Task RespondFromQnAMakerResultAsync(IDialogContext context, IMessageActivity message, QnAMakerResults result)
{
var answer = result.Answers.First().Answer;
Activity reply = ((Activity)context.Activity).CreateReply();
string[] qnaAnswerData = answer.Split(';');
int dataSize = qnaAnswerData.Length;
//image and video card
if (dataSize > 1 && dataSize <= 6)
{
var attachment = GetSelectedCard(answer);
reply.Attachments.Add(attachment);
await context.PostAsync(reply);
}
else
{
// await context.Forward(new BasicQnAMakerDialog(), AfterAnswerAsync, message, CancellationToken.None);
await context.PostAsync(answer);
}
}
private static Attachment GetSelectedCard(string answer)
{
int len = answer.Split(';').Length;
switch (len)
{
case 4: return GetHeroCard(answer);
case 6: return GetVideoCard(answer);
default: return GetHeroCard(answer);
}
}
private static Attachment GetHeroCard(string answer)
{
string[] qnaAnswerData = answer.Split(';');
string title = qnaAnswerData[0];
string description = qnaAnswerData[1];
string url = qnaAnswerData[2];
string imageURL = qnaAnswerData[3];
HeroCard card = null;
{
card = new HeroCard
{
Title = title,
Subtitle = description,
};
card.Buttons = new List<CardAction>
{
new CardAction(ActionTypes.OpenUrl, "Learn More", value: url)
};
card.Images = new List<CardImage>
{
new CardImage( url = imageURL)
};
}
return card.ToAttachment();
}
private static Attachment GetVideoCard(string answer)
{
string[] qnaAnswerData = answer.Split(';');
string title = qnaAnswerData[0];
string subtitle = qnaAnswerData[1];
string description = qnaAnswerData[2];
string thumbimageurl = qnaAnswerData[3];
string mediaUrl = qnaAnswerData[4];
string url = qnaAnswerData[5];
VideoCard card = new VideoCard
{
Title = title,
Subtitle = subtitle,
Text = description,
};
card.Image = new ThumbnailUrl
{
Url = thumbimageurl
};
card.Media = new List<MediaUrl>
{
new MediaUrl()
{
Url = mediaUrl
}
};
card.Buttons = new List<CardAction>
{
new CardAction()
{
Title = "View Full Screen",
Type = ActionTypes.OpenUrl,
Value = url
}
};
return card.ToAttachment();
}
Partial NodeJS
async onTurn(turnContext) {
if (turnContext.activity.type === ActivityTypes.Message) {
for (let i = 0; i < this.qnaServices.length; i++) {
// Perform a call to the QnA Maker service to retrieve matching Question and Answer pairs.
const qnaResults = await this.qnaServices[i].getAnswers(turnContext);
const qnaCard = qnaResults.includes(';');
// If an answer was received from QnA Maker, send the answer back to the user and exit.
if (qnaCard.toString().split(';').length > 3) {
await turnContext.sendActivity(qnaResults[0].answer);
await turnContext.sendActivity({
text: 'Video Card',
attachments: [CardFactory.adaptiveCard(VideoCard)]
});
} else if (qnaCard.toString().split(';').length < 3) {
await turnContext.sendActivity(qnaResults[0].answer);
await turnContext.sendActivity({
text: 'Hero Card',
attachments: [CardFactory.adaptiveCard(HeroCard)]
});
return;
}
}
The CardFactory in the v4 Node SDK has rendering functions for each type of card (HeroCards, VideoCards, AdaptiveCards, etc). You should use the corresponding function for the type of card you are trying to send. Your code should look like:
async onTurn(turnContext) {
if (turnContext.activity.type === ActivityTypes.Message) {
for (let i = 0; i < this.qnaServices.length; i++) {
// Perform a call to the QnA Maker service to retrieve matching Question and Answer pairs.
const qnaResults = await this.qnaServices[i].getAnswers(turnContext);
const qnaCard = qnaResults.includes(';');
// If an answer was received from QnA Maker, send the answer back to the user and exit.
if (qnaCard.toString().split(';').length > 3) {
await turnContext.sendActivity(qnaResults[0].answer);
await turnContext.sendActivity({
text: 'Video Card',
attachments: [CardFactory.videoCard(VideoCard)]
});
} else if (qnaCard.toString().split(';').length < 3) {
await turnContext.sendActivity(qnaResults[0].answer);
await turnContext.sendActivity({
text: 'Hero Card',
attachments: [CardFactory.heroCard(HeroCard)]
});
return;
}
}
}
}
Checkout the Using Cards and Using Adaptive Cards for more examples on how to send cards in the v4 BotFramework Node SDK.
Hope this helps!
Related
I am trying to develop a MS Teams bot that sends content to students module(unit) wise. I have created 3 classes:
methods.js = Contains all the methods for sending texts, attachments etc.
teamBot.js = Captures a specific keyword from the users and based on that executes a function.
test.js = Connects the bot with Airtable and sends the content accordingly
I am facing Cannot perform 'get' on a proxy that has been revoked error. I figured it might be because of the context. I am passing context as a parameter, which I feel might not be the correct way, how can I achieve the result, and retain the context between files.
teamsBot.js
const test = require("./test");
class TeamsBot extends TeamsActivityHandler {
constructor() {
super();
// record the likeCount
this.likeCountObj = { likeCount: 0 };
this.onMessage(async (context, next) => {
console.log("Running with Message Activity.");
let txt = context.activity.text;
// const removedMentionText = TurnContext.removeRecipientMention(context.activity);
// if (removedMentionText) {
// // Remove the line break
// txt = removedMentionText.toLowerCase().replace(/\n|\r/g, "").trim();
// }
// Trigger command by IM text
switch (txt) {
case "Begin": {
await test.sendModuleContent(context)
}
// By calling next() you ensure that the next BotHandler is run.
await next();
});
// Listen to MembersAdded event, view https://learn.microsoft.com/en-us/microsoftteams/platform/resources/bot-v3/bots-notifications for more events
this.onMembersAdded(async (context, next) => {
const membersAdded = context.activity.membersAdded;
for (let cnt = 0; cnt < membersAdded.length; cnt++) {
if (membersAdded[cnt].id) {
const card = cardTools.AdaptiveCards.declareWithoutData(rawWelcomeCard).render();
await context.sendActivity({ attachments: [CardFactory.adaptiveCard(card)] });
break;
}
}
await next();
});
}
test.js
const ms = require('./methods')
async function sendModuleContent(context) {
data = module_text //fetched from Airtable
await ms.sendText(context, data)
}
methods.js
const {TeamsActivityHandler, ActivityHandler, MessageFactory } = require('botbuilder');
async function sendText(context, text){
console.log("Sending text")
await context.sendActivity(text);
}
Refer this: TypeError: Cannot perform 'get' on a proxy that has been revoked
make the following changes to test.js
const {
TurnContext
} = require("botbuilder");
var conversationReferences = {};
var adapter;
async function sendModuleContent(context) {
data = module_text //fetched from Airtable
const currentUser = context.activity.from.id;
conversationReferences[currentUser] = TurnContext.getConversationReference(context.activity);
adapter = context.adapter;
await adapter.continueConversation(conversationReferences[currentUser], async turnContext => {
await turnContext.sendActivity(data);
});
}
is there a youtube API that can assit to add scraped videos URl to a Youtube playlist using a BOT
import { url } from "inspector";
import { Context, Telegraf } from "telegraf";
import { getFilters, Item } from "ytsr";
import { toTitleCase } from "./helpers";
import { SearchResult } from "./interface";
import { youtubeSearch } from "./ytsr";
require("dotenv").config();
if (!process.env.BOT_TOKEN) {
throw new Error("BOT_TOKEN must be in your .env");
}
const bot = new Telegraf(process.env.BOT_TOKEN!);
bot.on("text", async (ctx: Context) => {
const text: string = ctx.message?.text
? ctx.message?.text
: ctx.update.message?.text || "";
console.log("user msg:", text);
let details = text.split(",");
details = details.map((item: string) => {
return item.trim();
});
details = details.filter(Boolean);
console.log(details);
let searchString = details[0];
let type = toTitleCase(details[1] || "");
let feature = details[2];
let duration = details[3];
let sortBy = details[4];
let searchResults = await youtubeSearch(
searchString,
type,
feature,
duration
);
console.log("Our videos ", searchResults);
/*
searchResults?.items.forEach((item: Item, index: number) => {
console.log("res:", item.type);
let message = "";
if (item.type == "playlist") {
message = `Title: ${item.title}`;
message += `\nUrl: ${item.url}`;
message += `\nLength: ${item.length}`;
} else if (item.type == "video") {
message = `Title: ${item.title}`;
message += `\nUrl: ${item.url}`;
message += `\nDuration: ${item.duration}`;
message += `\nIs Live: ${item.isLive}`;
message += `\nViews: ${item.views}`;
}
*/
return ctx.reply(searchResults!);
}
module.exports = function(robot: { logger: { warning: (arg0: string) => void; }; })
{
// OAUTH...
if (process.env.GOOGLE_OAUTH2_API_KEY == null) {
robot.logger.warning("Need GOOGLE_OAUTH2_API_KEY");
return;
}
if (process.env.GOOGLE_OAUTH2_API_SECRET == null) {
robot.logger.warning("Need GOOGLE_OAUTH2_API_SECRET");
return;
}
};
export { bot };
what i have done here is to scrape youtube url on telegram using a bot, after searching a keyword, the results should be filtered through categories such as videos,Features,duration,relevance the end results is a scraped filtred data only the url containing that data is obtained is there a way we can insert the urls into the playlist automatically thus creating a playlist, without needing of me adding videos manualy by hand to create the playlist
I'm in the process of designing a chat bot and trying to find some Node.js sample code and/or documentation on how to implement the Azure Maps service as part of Bot Framework V4. There are many examples of how this is accomplished in V3, but there seems to be no examples of a V4 solution for Node.js. I'm looking to create a step in my botbuilder-dialog flow that would launch a simple "where do we ship it too" location dialog that would guide the user through the dialog and store the address results as part of that users profile. Any help or advice on this would be appreciated.
Yes, this is doable. I created a class (probably overkill, but oh well) in which I make my API call, with my supplied parameters, to get the map. I decided to use Azure Maps (vs Bing Maps) only because I was curious in how it differed. There isn't any reason you couldn't do this with Bing Maps, as well.
In the bot, I am using a component dialog because of how I have the rest of my bot designed. When the dialog ends, it will fall off the stack and return to the parent dialog.
In my scenario, the bot presents the user with a couple choices. "Send me a map" generates a map and sends it in an activity to the client/user. Anything else sends the user onward ending the dialog.
You will need to decide how you are getting the user's location. I developed this with Web Chat in mind, so I am getting the geolocation from the browser and returning it to the bot to be used when getMap() is called.
const { ActivityTypes, InputHints } = require('botbuilder');
const fetch = require('node-fetch');
class MapHelper {
async getMap(context, latitude, longitude) {
var requestOptions = {
method: 'GET',
headers: {
'Content-Type': 'application/json'
},
redirect: 'follow'
};
const result = await fetch(`https://atlas.microsoft.com/map/static/png?subscription-key=${ process.env.AZURE_MAPS_KEY }&api-version=1.0&layer=basic&zoom=13¢er=${ longitude },${ latitude }&language=en-US&pins=default|al.67|la12 3|lc000000||'You!'${ longitude } ${ latitude }&format=png`, requestOptions)
.then(response => response.arrayBuffer())
.then(async result => {
const bufferedData = Buffer.from(result, 'binary');
const base64 = bufferedData.toString('base64');
const reply = { type: ActivityTypes.Message };
const attachment = {
contentType: 'image/png',
contentUrl: `data:image/png;base64,${ base64 }`
};
reply.attachments = [attachment];
await context.sendActivity(reply, null, InputHints.IgnoringInput);
})
.catch(error => {
if (error) throw new Error(error);
});
return result;
};
};
module.exports.MapHelper = MapHelper;
const { ChoicePrompt, ChoiceFactory, ComponentDialog, ListStyle, WaterfallDialog } = require('botbuilder-dialogs');
const { MapHelper } = require('./mapHelper');
const CONFIRM_LOCALE_DIALOG = 'confirmLocaleDialog';
const CHOICE_PROMPT = 'confirmPrompt';
class ConfirmLocaleDialog extends ComponentDialog {
constructor() {
super(CONFIRM_LOCALE_DIALOG);
this.addDialog(new ChoicePrompt(CHOICE_PROMPT))
.addDialog(new WaterfallDialog(CONFIRM_LOCALE_DIALOG, [
this.askLocationStep.bind(this),
this.getMapStep.bind(this)
]));
this.initialDialogId = CONFIRM_LOCALE_DIALOG;
}
async askLocationStep(stepContext) {
const choices = ['Send me a map', "I'll have none of this nonsense!"];
return await stepContext.prompt(CHOICE_PROMPT, {
prompt: 'Good sir, may I pinpoint you on a map?',
choices: ChoiceFactory.toChoices(choices),
style: ListStyle.suggestedAction
});
}
async getMapStep(stepContext) {
const { context, context: { activity } } = stepContext;
const text = activity.text.toLowerCase();
if (text === 'send me a map') {
const { latitude, longitude } = activity.channelData;
const mapHelper = new MapHelper();
await mapHelper.getMap(context, latitude, longitude);
const message = 'Thanks for sharing!';
await stepContext.context.sendActivity(message);
return await stepContext.endDialog();
} else {
await stepContext.context.sendActivity('No map for you!');
return await stepContext.endDialog();
}
}
}
module.exports.ConfirmLocaleDialog = ConfirmLocaleDialog;
module.exports.CONFIRM_LOCALE_DIALOG = CONFIRM_LOCALE_DIALOG;
Hope of help!
---- EDIT ----
Per request, location data can be obtained from the browser using the below method. It is, of course, dependent on the user granting access to location data.
navigator.geolocation.getCurrentPosition( async (position) => {
const { latitude, longitude } = position.coords;
// Do something with the data;
console.log(latitude, longitude)
})
Is there any way in which I can get Historian for a particular participant in hyperledger-composer using node API?
I am developing an application based on hyperledger-composer using Node APIs.I want to show the history of transaction of a particular participant in his/her profile. I have created the permission.acl for that and that is working fine in playground. But when i am accessing the historian from node API it is giving complete historian of the network. I don't know how to filter that for a participant.
you can return results from REST API calls since v0.20 to the calling client application, so something like the following would work (not tested, but you get the idea). NOTE: You could just call the REST API end (/GET Trader) direct via REST with your parameter (or whatever endpoints you create for your own business network - the example below is trade-network), rather than the example of using 'READ-ONLY' Transaction processor Endpoint described below, for returning larger result sets to your client application. See more on this in the docs
NODE JS Client using APIs:
const BusinessNetworkConnection = require('composer-client').BusinessNetworkConnection;
const rp = require('request-promise');
this.bizNetworkConnection = new BusinessNetworkConnection();
this.cardName ='admin#mynet';
this.businessNetworkIdentifier = 'mynet';
this.bizNetworkConnection.connect(this.cardName)
.then((result) => {
//You can do ANYTHING HERE eg.
})
.catch((error) => {
throw error;
});
// set up my read only transaction object - find the history of a particular Participant - note it could equally be an Asset instead !
var obj = {
"$class": "org.example.trading.MyPartHistory",
"tradeId": "P1"
};
async function callPartHistory() {
var options = {
method: 'POST',
uri: 'http://localhost:3000/api/MyPartHistory',
body: obj,
json: true
};
let results = await rp(options);
// console.log("Return value from REST API is " + results);
console.log(" ");
console.log(`PARTICIPANT HISTORY for Asset ID: ${results[0].tradeId} is: `);
console.log("=============================================");
for (const part of results) {
console.log(`${part.tradeId} ${part.name}` );
}
}
// Main
callPartHistory();
//
MODEL FILE
#commit(false)
#returns(Trader[])
transaction MyPartHistory {
o String tradeId
}
READ-ONLY TRANSACTION PROCESSOR CODE (in 'logic.js') :
/**
* Sample read-only transaction
* #param {org.example.trading.MyPartHistory} tx
* #returns {org.example.trading.Trader[]} All trxns
* #transaction
*/
async function participantHistory(tx) {
const partId = tx.tradeid;
const nativeSupport = tx.nativeSupport;
// const partRegistry = await getParticipantRegistry('org.example.trading.Trader')
const nativeKey = getNativeAPI().createCompositeKey('Asset:org.example.trading.Trader', [partId]);
const iterator = await getNativeAPI().getHistoryForKey(nativeKey);
let results = [];
let res = {done : false};
while (!res.done) {
res = await iterator.next();
if (res && res.value && res.value.value) {
let val = res.value.value.toString('utf8');
if (val.length > 0) {
console.log("#debug val is " + val );
results.push(JSON.parse(val));
}
}
if (res && res.done) {
try {
iterator.close();
}
catch (err) {
}
}
}
var newArray = [];
for (const item of results) {
newArray.push(getSerializer().fromJSON(item));
}
console.log("#debug the results to be returned are as follows: ");
return newArray; // returns something to my NodeJS client (called via REST API)
}
I'm having fun with the Alexa API, so I downloaded a Hello World example from here
https://developer.amazon.com/appsandservices/solutions/alexa/alexa-skills-kit/getting-started-guide
I managed to made some minor changes and have Alexa say other things.
But now I want to have a real world example working, so I tried to get the latest tweet for user.
so I coded a twitter function and it works, I see the tweet on my console.
Besides, the downloaded example works just fine too.
But now, when I try to combine them by adding my twitter function into the Alexa example, it throws the following error when trying to print the value (if I don't print it, it doesn't break):
{"errorMessage": "Exception: ReferenceError: data is not defined"}
here is the code but the modified function is getWelcomeResponse()
// Route the incoming request based on type (LaunchRequest, IntentRequest,
// etc.) The JSON body of the request is provided in the event parameter.
exports.handler = function (event, context) {
try {
console.log("event.session.application.applicationId=" + event.session.application.applicationId);
/**
* Uncomment this if statement and populate with your skill's application ID to
* prevent someone else from configuring a skill that sends requests to this function.
*/
/*
if (event.session.application.applicationId !== "amzn1.echo-sdk-ams.app.[unique-value-here]") {
context.fail("Invalid Application ID");
}
*/
if (event.session.new) {
onSessionStarted({requestId: event.request.requestId}, event.session);
}
if (event.request.type === "LaunchRequest") {
onLaunch(event.request,
event.session,
function callback(sessionAttributes, speechletResponse) {
context.succeed(buildResponse(sessionAttributes, speechletResponse));
});
} else if (event.request.type === "IntentRequest") {
onIntent(event.request,
event.session,
function callback(sessionAttributes, speechletResponse) {
context.succeed(buildResponse(sessionAttributes, speechletResponse));
});
} else if (event.request.type === "SessionEndedRequest") {
onSessionEnded(event.request, event.session);
context.succeed();
}
} catch (e) {
context.fail("Exception: " + e);
}
};
/**
* Called when the session starts.
*/
function onSessionStarted(sessionStartedRequest, session) {
console.log("onSessionStarted requestId=" + sessionStartedRequest.requestId
+ ", sessionId=" + session.sessionId);
}
/**
* Called when the user launches the skill without specifying what they want.
*/
function onLaunch(launchRequest, session, callback) {
console.log("onLaunch requestId=" + launchRequest.requestId
+ ", sessionId=" + session.sessionId);
// Dispatch to your skill's launch.
getWelcomeResponse(callback);
}
/**
* Called when the user specifies an intent for this skill.
*/
function onIntent(intentRequest, session, callback) {
console.log("onIntent requestId=" + intentRequest.requestId
+ ", sessionId=" + session.sessionId);
var intent = intentRequest.intent,
intentName = intentRequest.intent.name;
// Dispatch to your skill's intent handlers
if ("MyColorIsIntent" === intentName) {
setColorInSession(intent, session, callback);
} else if ("WhatsMyColorIntent" === intentName) {
getColorFromSession(intent, session, callback);
} else if ("HelpIntent" === intentName) {
getWelcomeResponse(callback);
} else {
throw "Invalid intent";
}
}
/**
* Called when the user ends the session.
* Is not called when the skill returns shouldEndSession=true.
*/
function onSessionEnded(sessionEndedRequest, session) {
console.log("onSessionEnded requestId=" + sessionEndedRequest.requestId
+ ", sessionId=" + session.sessionId);
// Add cleanup logic here
}
// --------------- Functions that control the skill's behavior -----------------------
function getWelcomeResponse(callback) {
var twit = require('twitter'),
twitter = new twit({
consumer_key:'***',
consumer_secret:'***',
access_token_key:'***',
access_token_secret:'***'
});
//var count = 0;
var util = require('util');
params = {
screen_name: 'kilinkis', // the user id passed in as part of the route
count: 1 // how many tweets to return
};
// request data
twitter.get('https://api.twitter.com/1.1/statuses/user_timeline.json', params, function (data) {
console.log(util.inspect(data[0].text));
});
// If we wanted to initialize the session to have some attributes we could add those here.
var sessionAttributes = {};
var cardTitle = "Welcome";
/*var speechOutput = "Welcome to the Alexa Skills Kit sample, "
+ "Please tell me your favorite color by saying, "
+ "my favorite color is red";*/
//var speechOutput=util.inspect(data[0].text);
var speechOutput=data[0].text;
// If the user either does not reply to the welcome message or says something that is not
// understood, they will be prompted again with this text.
var repromptText = "Please tell me your favorite color by saying, "
+ "my favorite color is red";
var shouldEndSession = true;
callback(sessionAttributes,
buildSpeechletResponse(cardTitle, speechOutput, repromptText, shouldEndSession));
}
/**
* Sets the color in the session and prepares the speech to reply to the user.
*/
function setColorInSession(intent, session, callback) {
var cardTitle = intent.name;
var favoriteColorSlot = intent.slots.Color;
var repromptText = "";
var sessionAttributes = {};
var shouldEndSession = false;
var speechOutput = "";
if (favoriteColorSlot) {
favoriteColor = favoriteColorSlot.value;
sessionAttributes = createFavoriteColorAttributes(favoriteColor);
speechOutput = "I now know your favorite color is " + favoriteColor + ". You can ask me "
+ "your favorite color by saying, what's my favorite color?";
repromptText = "You can ask me your favorite color by saying, what's my favorite color?";
} else {
speechOutput = "I'm not sure what your favorite color is, please try again";
repromptText = "I'm not sure what your favorite color is, you can tell me your "
+ "favorite color by saying, my favorite color is red";
}
callback(sessionAttributes,
buildSpeechletResponse(cardTitle, speechOutput, repromptText, shouldEndSession));
}
function createFavoriteColorAttributes(favoriteColor) {
return {
favoriteColor: favoriteColor
};
}
function getColorFromSession(intent, session, callback) {
var cardTitle = intent.name;
var favoriteColor;
var repromptText = null;
var sessionAttributes = {};
var shouldEndSession = false;
var speechOutput = "";
if(session.attributes) {
favoriteColor = session.attributes.favoriteColor;
}
if(favoriteColor) {
speechOutput = "Your favorite color is " + favoriteColor + ", goodbye";
shouldEndSession = true;
}
else {
speechOutput = "I'm not sure what your favorite color is, you can say, my favorite color "
+ " is red";
}
// Setting repromptText to null signifies that we do not want to reprompt the user.
// If the user does not respond or says something that is not understood, the session
// will end.
callback(sessionAttributes,
buildSpeechletResponse(intent.name, speechOutput, repromptText, shouldEndSession));
}
// --------------- Helpers that build all of the responses -----------------------
function buildSpeechletResponse(title, output, repromptText, shouldEndSession) {
return {
outputSpeech: {
type: "PlainText",
text: output
},
card: {
type: "Simple",
title: "SessionSpeechlet - " + title,
content: "SessionSpeechlet - " + output
},
reprompt: {
outputSpeech: {
type: "PlainText",
text: repromptText
}
},
shouldEndSession: shouldEndSession
}
}
function buildResponse(sessionAttributes, speechletResponse) {
return {
version: "1.0",
sessionAttributes: sessionAttributes,
response: speechletResponse
}
}
Can some one please guide me on what's wrong? it's probably a scope issue, I'm not sure.
Move your call back inside the twitter get function. Then your callback will be called on a successful get from the twitter api. Also you will have access to the data object. You will probably want to add a failure case as well and include a context.fail().
If you need to, you can also update the timeout parameter under the configuration tab of the AWS console. Its under advanced settings. Also, Its often useful to take Alexa out of the equation when debugging and just get the twitter api piece working first.
// request data
twitter.get('https://api.twitter.com/1.1/statuses/user_timeline.json', params, function (data) {
console.log(util.inspect(data[0].text));
// If we wanted to initialize the session to have some attributes we could add those here.
var sessionAttributes = {};
var cardTitle = "Welcome";
var speechOutput=data[0].text;
var repromptText = "";
var shouldEndSession = true;
callback(sessionAttributes,
buildSpeechletResponse(cardTitle, speechOutput, repromptText, shouldEndSession));
});