Actions on Google: Unable to Close Convo in DialogFlow Fulfillment - dialogflow-es

I am on ActionOnGoogle V2 SDK, and using Firebase Functions
I tried this code...
import * as functions from 'firebase-functions';
const admin = require('firebase-admin');
const serviceAccount = require("../key/secretkey.json");
import {dialogflow, BasicCard, SimpleResponse, List, Carousel} from 'actions-on-google';
admin.initializeApp({
credential: admin.credential.cert(serviceAccount),
databaseURL: "https://dburl.com"
});
const getSomethingPromize = admin.database().ref(`/user_list/`).orderByChild('blood_group');
const app = dialogflow();
app.intent('getLocation', async (conv:any, parameters) => {
console.log("params", parameters);
return getSomethingPromize.equalTo(parameters.blood_group)
.once('value')
.then((snapshot) => {
const childCount = snapshot.numChildren();
let message = "";
let singleRecord;
switch (childCount) {
case 0:
message = `No Record`;
break;
case 1:
singleRecord = snapshot.val();
singleRecord = singleRecord[Object.keys(singleRecord)[0]];
message = `1 Record`;
getBasicCard(conv, singleRecord);
break;
default:
let myItems = {};
snapshot.forEach(childSnapshot => {
const entity = childSnapshot.val();
const state:any = parameters.indian_states;
if (entity.state.toLowerCase() !== state.toLowerCase()){
return;
}
myItems[entity.name] = {
synonyms: [
entity.name,
],
title: ` ${entity.name} `,
description: ` Contact : ${entity.phone} ${entity.phone2}, ${entity.city}, ${entity.state}, ${entity.pincode} `,
};
});
message = `Multiple Records`;
//ISSUE HERE
conv.close(new List({
title: `List Title`,
items: myItems,
}));
}
return getSimpleResponse(conv, parameters, message);
});
});
function getSimpleResponse(conv, parameters, message=null){
let displayMessage = message;
if (!message) {
displayMessage = `Sorry! No Record Found`;
}
return conv.close(new SimpleResponse({
text: displayMessage,
speech: displayMessage
}));
}
function getBasicCard(conv, singleRecord){
return conv.close(new BasicCard({
text: `${singleRecord.blood_group}, ${singleRecord.state}`,
subtitle: `Contact : ${singleRecord.phone} ${singleRecord.phone2}, ${singleRecord.city}, ${singleRecord.state}, ${singleRecord.pincode}, ${singleRecord.comment} `,
title: `${singleRecord.name}`,
display: 'CROPPED',
}));
}
export const fulfillment = functions.https.onRequest(app);
Issue is : When i Tried to Close Convo by Sending Rich List Card, the Convo remains Open.
Hence, on Stackdriver i am Issued with
"MalformedResponse: 'final_response' must be set"
I am Referencing this Docs
https://developers.google.com/actions/assistant/responses#list

You can only close the conversation with a SimpleResponse. According to the docs, "Your final response must be a single simple response with a 60-character limit on its textToSpeech and displayText values".

Related

DiscordAPIError[50035]: Invalid Form Body data.content[BASE_TYPE_MAX_LENGTH]: Must be 2000 or fewer in length

I Have the Data I Pulled from the Internet via Node js in My Bot
I wanted to share, but I get max_lenght error because the text is too long, what should I do?
the code i wrote;
const interactionCreate = require("../events/interactionCreate");
const Parser = require('rss-parser');
const { SlashCommandBuilder } = require("discord.js");
const parser = new Parser()
const { Routes } = require('discord-api-types/v9')
const { request } = require('undici')
const { EmbedBuilder } = require('discord.js');
const fs = require("fs")
const jsonToTxt = require("json-to-txt");
module.exports = {
name: "adsoyad",
description: "ad soyad bilgi ",
options:[
{
name: "adı",
description: "sorgulanacak kişi ismi",
type: 3,
required: true,
},
{
name: "soyadı",
description : "sorgulanacak kişi soyadı",
type: 3,
required: true,
}
],
run: async (client, interaction) =>{
//const ad = interaction.options.getString("ad");
// const soyad = interaction.options.getString("soyad");
//const feed = await parser.parseURL(`[URL]http://141.11.127.168/ucretsizapi/adsoyad.php?ad=&soyad=sezer&auth=propenthia[/URL]`)
const ad = interaction.options.getString('adı');
const soyad = interaction.options.getString('soyadı');
const query = new URLSearchParams({ ad });
const query2 = new URLSearchParams({ soyad });
const dictResult = await request(`http://141.11.127.168/ucretsizapi/adsoyad.php?${query}&${query2}&auth=propenthia`);
//const list = await dictResult.body.text();
const asa = await dictResult.body.text(8000);
//fs.writeFileSync("data.json", JSON.stringify(asa));
// Convert the JSON data to a text file
//jsonToTxt("data.json", "data.txt");
interaction.length = 8000
//fs.writeFileSync(asa)
await interaction.reply(`${asa}`)
//const ms = await JSON.stringify(list)
//var mars = JSON.parse(ms)
}
}
the error i got;
throw new DiscordAPIError(data, "code" in data ? data.code : data.error, status, method, url, requestData);
^
DiscordAPIError\[50035\]: Invalid Form Body
data.content\[BASE_TYPE_MAX_LENGTH\]: Must be 2000 or fewer in length.
at SequentialHandler.runRequest (C:\\Users\\trwor\\OneDrive\\Masaüstü\\%64 bot Project\\node_modules\\#discordjs\\rest\\dist\\index.js:667:15)
at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
at async SequentialHandler.queueRequest (C:\\Users\\trwor\\OneDrive\\Masaüstü\\%64 bot Project\\node_modules\\#discordjs\\rest\\dist\\index.js:464:14)
at async REST.request (C:\\Users\\trwor\\OneDrive\\Masaüstü\\%64 bot Project\\node_modules\\#discordjs\\rest\\dist\\index.js:910:22)
at async ChatInputCommandInteraction.reply (C:\\Users\\trwor\\OneDrive\\Masaüstü\\%64 bot Project\\node_modules\\discord.js\\src\\structures\\interfaces\\InteractionResponses.js:111:5)
at async Object.run (C:\\Users\\trwor\\OneDrive\\Masaüstü\\%64 bot Project\\commands\\adsoyad.js:64:9) {
requestBody: {
I need your help my friends, thank you in advance to those who try to solve it, frankly, I had a very difficult time.

Add authentication and Qnamaker to bot

I am trying to build a bot using bot framework sdk.
Objective:-
To authenticate user.
afte authentication communicate with Qnamaker knowledge base.
signout user if he types logout
I am using bot authentication template from https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/javascript_nodejs/18.bot-authentication. I have tried to add qnamaker service in the waterflowdialog.
I am facing two issues:-
With every Qnamaker reply I am getting message "you are now logged in".
Bot encountered an error.
Source Code
mainDialog.js
const {
ConfirmPrompt,
DialogSet,
DialogTurnStatus,
OAuthPrompt,
WaterfallDialog,
} = require("botbuilder-dialogs");
const { LogoutDialog } = require("./logoutDialog");
const { QnAMakerDialog } = require("botbuilder-ai");
const CONFIRM_PROMPT = "ConfirmPrompt";
const MAIN_DIALOG = "MainDialog";
const MAIN_WATERFALL_DIALOG = "MainWaterfallDialog";
const OAUTH_PROMPT = "OAuthPrompt";
const QNAMAKER_BASE_DIALOG = "qnamaker-base-dialog";
const createQnAMakerDialog = (
knowledgeBaseId,
endpointKey,
endpointHostName,
defaultAnswer
) => {
let noAnswerActivity;
if (typeof defaultAnswer === "string") {
noAnswerActivity = MessageFactory.text(defaultAnswer);
}
const qnaMakerDialog = new QnAMakerDialog(
knowledgeBaseId,
endpointKey,
endpointHostName,
noAnswerActivity
);
qnaMakerDialog.id = QNAMAKER_BASE_DIALOG;
return qnaMakerDialog;
};
class MainDialog extends LogoutDialog {
constructor(knowledgeBaseId, endpointKey, endpointHostName, defaultAnswer) {
super(MAIN_DIALOG, process.env.connectionName);
this.addDialog(
new OAuthPrompt(OAUTH_PROMPT, {
connectionName: process.env.connectionName,
text: "Please Sign In",
title: "Sign In",
timeout: 300000,
})
);
this.addDialog(new ConfirmPrompt(CONFIRM_PROMPT));
this.addDialog(
new WaterfallDialog(MAIN_WATERFALL_DIALOG, [
this.promptStep.bind(this),
this.loginStep.bind(this),
this.qnaMaker.bind(this),
])
);
this.addDialog(
createQnAMakerDialog(
knowledgeBaseId,
endpointKey,
endpointHostName,
defaultAnswer
)
);
this.initialDialogId = MAIN_WATERFALL_DIALOG;
}
/**
* The run method handles the incoming activity (in the form of a DialogContext) and passes it through the dialog system.
* If no dialog is active, it will start the default dialog.
* #param {*} dialogContext
*/
async run(context, accessor) {
const dialogSet = new DialogSet(accessor);
dialogSet.add(this);
const dialogContext = await dialogSet.createContext(context);
const results = await dialogContext.continueDialog();
if (results.status === DialogTurnStatus.empty) {
await dialogContext.beginDialog(this.id);
}
}
async qnaMaker(stepContext) {
await stepContext.beginDialog(QNAMAKER_BASE_DIALOG);
}
async promptStep(stepContext) {
return await stepContext.beginDialog(OAUTH_PROMPT);
}
async loginStep(stepContext) {
// Get the token from the previous step. Note that we could also have gotten the
// token directly from the prompt itself. There is an example of this in the next method.
const tokenResponse = stepContext.result;
if (tokenResponse) {
await stepContext.context.sendActivity("You are now logged in");
return await stepContext.next();
}
await stepContext.context.sendActivity(
"Login was not successful please try again."
);
return await stepContext.endDialog();
}
}
module.exports.MainDialog = MainDialog;
Bot ScreenShot
github link : https://github.com/chandelsumeet/authBot
async loginStep(stepContext) {
// Get the token from the previous step. Note that we could also have gotten the
// token directly from the prompt itself. There is an example of this in the next method.
const tokenResponse = stepContext.result;
if (tokenResponse) {
await stepContext.context.sendActivity('You are now logged in.');
return await stepContext.prompt(CONFIRM_PROMPT, 'Would you like to view your token?');
}
await stepContext.context.sendActivity('Login was not successful please try again.');
return await stepContext.endDialog();
}
Replace the loginStep() with above code block and check it.

Firebase cloud function doesn't send push notification with async

My goal is to send a push notification when a user sends a message. I am trying to do this by retrieving all of the push tokens from the firestore database, and sending a multicast message using these tokens each time a new message is added to the realtime database.
Works
This first example works. There is no token retrieval, the tokens are hardcoded. I do receive the notifications.
exports.notifyUsers = functions.database.ref('/messages/{messageId}').onCreate((liveSnapshot, context) => {
const name = context.params.messageId;
const message = liveSnapshot.val().toString();
const tokens = [
"e6erA_qM...",
"ePU9p_CI...",
];
const payload = {
notification: {
title: `New message from ${name}`,
body: message,
badge: '1',
sound: 'default'
},
tokens: tokens,
}
const res = admin.messaging().sendMulticast(payload);
console.log(`response: ${res}`);
Doesn't work
This doesn't work, I don't receive any notifications.
exports.notifyUsers = functions.database.ref('/messages/{messageId}').onCreate(async (liveSnapshot, context) => {
const name = context.params.messageId;
const message = liveSnapshot.val().toString();
const snapshot = await admin.firestore().collection('users').get();
const tokens = snapshot.docs.map(doc => doc.data().token);
const payload = {
notification: {
title: `New message from ${name}`,
body: message,
badge: '1',
sound: 'default'
},
tokens: tokens,
}
const res = await admin.messaging().sendMulticast(payload);
console.log(`response: ${res}`);
I have verified that the tokens retrieved from the database are the same as the hardcoded ones with the following code.
exports.notifyUsers = functions.database.ref('/messages/{messageId}').onCreate(async (liveSnapshot, context) => {
const hardcodedTokens = [
"e6erA_qM...",
"ePU9p_CI...",
];
const snapshot = await admin.firestore().collection('users').get();
const tokens = snapshot.docs.map(doc => doc.data().token);
let same = true;
hardcodedTokens.forEach(el => {
if (!tokens.includes(el)) {
same = false;
}
});
console.log(same);
})
This logs true in the firebase cloud functions console.
The function uses Node 12.
I experienced a similar problem recently, and solved it by breaking out Android and iOS specific fields according to the Firebase docs :
const message = {
"notification": {
"title": `New message from ${name}`,
"body": message,
},
'apns': {
'payload': {
'aps': {
'badge': 1,
},
},
},
'android':{
'notification':{
'notificationCount': 1,
},
},
"tokens": tokens,
}
The following code works.
async function getTokens() {
const snapshot = await admin.firestore().collection('users').get();
return snapshot.docs.map(doc => doc.data().token);
}
exports.notifyUsers = functions.database.ref('/messages/{messageId}').onCreate(async (snapshot, context) => {
const name = context.params.messageId;
const message = snapshot.val().toString();
const tokens = await getTokens();
const payload = {
notification: {
title: `New message from ${name}`,
body: message,
},
tokens: tokens,
};
await admin.messaging().sendMulticast(payload);
})
I logged my response like below:
const res = await admin.messaging().sendMulticast(payload);
console.log('response:', JSON.stringify(res));
This logged the following:
response: {"responses":[{"success":false,"error":{"code":"messaging/invalid-argument","message":"Invalid JSON payload received. Unknown name \"sound\" at 'message.notification': Cannot find field."}},{"success":false,"error":{"code":"messaging/invalid-argument","message":"Invalid JSON payload received. Unknown name \"sound\" at 'message.notification': Cannot find field."}}],"successCount":0,"failureCount":2}
Based on this, I believe the problem was the sound argument in the notification part of the payload. It works after removing it.

How to implement Waterfall Dialog from within a Dialog depending on the LUIS recognized intent in Nodejs?

I have a Main Dialog in which I am having a Waterfall Dialog of only one step which is to Identify Intent and depending on the intent, I am deciding whether to call another dialog (which is another Waterfall Dialog with 3 steps) or directly reply back the user with response for intents like Greeting and Cancel. Below is the code for my Main Dialog -
const chalk = require('chalk')
const path = require('path');
const {
ComponentDialog,
DialogSet,
DialogTurnStatus,
WaterfallDialog
} = require('botbuilder-dialogs');
const {
TopLevelDialog,
TOP_LEVEL_DIALOG
} = require('./topLevelDialog');
const ENV_FILE = path.join(__dirname, '../.env');
require('dotenv').config({
path: ENV_FILE
});
const {
LuisRecognizer
} = require('botbuilder-ai');
const MAIN_DIALOG = 'MAIN_DIALOG';
const WATERFALL_DIALOG = 'WATERFALL_DIALOG';
const USER_PROFILE_PROPERTY = 'USER_PROFILE_PROPERTY';
const dispatchRecognizer = new LuisRecognizer({
applicationId: process.env.LuisAppId,
endpointKey: process.env.LuisAPIKey,
endpoint: `https://${process.env.LuisAPIHostName}`
}, {
includeAllIntents: true,
includeInstanceData: true
}, true);
class MainDialog extends ComponentDialog {
constructor(userState) {
super(MAIN_DIALOG);
this.userState = userState;
this.userProfileAccessor = userState.createProperty(USER_PROFILE_PROPERTY);
this.addDialog(new TopLevelDialog());
this.addDialog(new WaterfallDialog(WATERFALL_DIALOG, [
this.initialStep.bind(this),
]));
this.initialDialogId = WATERFALL_DIALOG;
}
async run(turnContext, accessor) {
const dialogSet = new DialogSet(accessor);
dialogSet.add(this);
const dialogContext = await dialogSet.createContext(turnContext);
const results = await dialogContext.continueDialog();
if (results.status === DialogTurnStatus.empty) {
await dialogContext.beginDialog(this.id);
}
}
async initialStep(stepContext) {
const recognizerResult = await dispatchRecognizer.recognize(stepContext.context);
const intent = LuisRecognizer.topIntent(recognizerResult);
console.log(chalk.yellow(intent))
await this.dispatchToTopIntentAsync(stepContext, intent);
console.log(chalk.green('after dispatch'))
return await stepContext.endDialog();
}
async dispatchToTopIntentAsync(stepContext, intent) {
console.log(chalk.blue('in dispatch to top intent'))
switch (intent) {
case 'Greeting':
console.log(chalk.red('greeting'))
return await this.greeting(stepContext);
default:
console.log(`Dispatch unrecognized intent: ${ intent }.`);
await stepContext.context.sendActivity(`Dispatch unrecognized intent: ${ intent }.`);
return await stepContext.beginDialog(TOP_LEVEL_DIALOG);
}
}
async greeting(stepContext) {
return await stepContext.context.sendActivity(`Welcome! how can I help you`);
}
}
module.exports.MainDialog = MainDialog;
module.exports.MAIN_DIALOG = MAIN_DIALOG;
In my case, the TOP_LEVEL_DIALOG is being when I type any query related to that intent, and ask the first step of that waterfall (asks for Name).
But when I type my name it comes out of that waterfall dialog and shows the greeting response as LUIS is identifying my name as Greeting intent.
How to solve this kind of issue?
I don't believe the issue is with your code, but with your LUIS model. If you type in your name and it returns the Greeting intent, then you should start by reviewing the utterances in your Greeting intent and how your LUIS model is trained/published.
You should navigate to the LUIS.ai site and test your model by entering your name and see what returns.

Which code pattern to use for Bot builder sdk 4 nodejs?

I am looking at the code sample, 14.nlp-with-dispatch which makes use of Dispatch, LUIS, and QnA Maker. Before looking at this code I had downloaded a NodeJS sample from the page Use multiple LUIS and QnA models, which looked like:
const { BotFrameworkAdapter, BotStateSet, ConversationState, MemoryStorage, TurnContext, UserState } = require('botbuilder');
const { LuisRecognizer, QnAMaker } = require('botbuilder-ai');
const { DialogSet } = require('botbuilder-dialogs');
const restify = require('restify');
// Create server
let server = restify.createServer();
server.listen(process.env.port || process.env.PORT || 3978, function () {
console.log(`${server.name} listening to ${server.url}`);
});
// Create adapter
const adapter = new BotFrameworkAdapter({
appId: '',
appPassword: ''
});
const dispatcher = new LuisRecognizer({
appId: '',
subscriptionKey: '',
serviceEndpoint: '',
verbose: true
});
//LUIS homeautomatio app
const homeAutomation = new LuisRecognizer({
appId: '',
subscriptionKey: '',
serviceEndpoint: '',
verbose: true
});
// LUIS `weather app`
const weather = new LuisRecognizer({
appId: '',
subscriptionKey: '',
serviceEndpoint: '',
verbose: true
});
// The QnA
const faq = new QnAMaker(
{
knowledgeBaseId: '',
endpointKey: '',
host: ''
},
{
answerBeforeNext: true
}
);
// Add state middleware
const storage = new MemoryStorage();
const convoState = new ConversationState(storage);
const userState = new UserState(storage);
adapter.use(new BotStateSet(convoState, userState));
// Register some dialogs for usage with the LUIS apps that are being dispatched to
const dialogs = new DialogSet();
// Helper function to retrieve specific entities from LUIS results
function findEntities(entityName, entityResults) {
let entities = []
if (entityName in entityResults) {
entityResults[entityName].forEach(entity => {
entities.push(entity);
});
}
return entities.length > 0 ? entities : undefined;
}
// Setup dialogs
dialogs.add('HomeAutomation_TurnOn', [
async (dialogContext, args) => {
const devices = findEntities('HomeAutomation_Device', args.entities);
const operations = findEntities('HomeAutomation_Operation', args.entities);
const state = convoState.get(dialogContext.context);
state.homeAutomationTurnOn = state.homeAutomationTurnOn ? state.homeAutomationTurnOn + 1 : 1;
await dialogContext.context.sendActivity(`${state.homeAutomationTurnOn}: You reached the "HomeAutomation_TurnOn" dialog.`);
if (devices) {
await dialogContext.context.sendActivity(`Found these "HomeAutomation_Device" entities:\n${devices.join(', ')}`);
}
if (operations) {
await dialogContext.context.sendActivity(`Found these "HomeAutomation_Operation" entities:\n${operations.join(', ')}`);
}
await dialogContext.end();
}
]);
dialogs.add('Weather_GetCondition', [
async (dialogContext, args) => {
const locations = findEntities('Weather_Location', args.entities);
const state = convoState.get(dialogContext.context);
state.weatherGetCondition = state.weatherGetCondition ? state.weatherGetCondition + 1 : 1;
await dialogContext.context.sendActivity(`${state.weatherGetCondition}: You reached the "Weather_GetCondition" dialog.`);
if (locations) {
await dialogContext.context.sendActivity(`Found these "Weather_Location" entities:\n${locations.join(', ')}`);
}
await dialogContext.end();
}
]);
adapter.use(dispatcher);
// Listen for incoming Activities
server.post('/api/messages', (req, res) => {
adapter.processActivity(req, res, async (context) => {
if (context.activity.type === 'message') {
//the dialog set requires use of a state property accessor to access the dialog state
const state = convoState.get(context);
const dc = dialogs.createContext(context, state);
// Retrieve the LUIS results from our dispatcher LUIS application
const luisResults = dispatcher.get(context);
// Extract the top intent from LUIS and use it to select which LUIS application to dispatch to
const topIntent = LuisRecognizer.topIntent(luisResults);
const isMessage = context.activity.type === 'message';
if (isMessage) {
switch (topIntent) {
case 'l_homeautomation':
//Call luis.
const homeAutoResults = await homeAutomation.recognize(context);
const topHomeAutoIntent = LuisRecognizer.topIntent(homeAutoResults);
// topHomeAutoIntent = HomeAutomation_TurnOn
await dc.begin(topHomeAutoIntent, homeAutoResults);
break;
case 'l_weather':
const weatherResults = await weather.recognize(context);
const topWeatherIntent = LuisRecognizer.topIntent(weatherResults);
await dc.begin(topWeatherIntent, weatherResults);
break;
case 'q_FAQ':
await faq.answer(context);
break;
default:
await dc.begin('None');
}
}
if (!context.responded) {
await dc.continue();
if (!context.responded && isMessage) {
await dc.context.sendActivity(`Hi! I'm the LUIS dispatch bot. Say something and LUIS will decide how the message should be routed.`);
}
}
}
});
});
The above code looks very different from what's in the 14.nlp-with-dispatch sample when it comes to designing, defining and triggering dialogs.
Have new coding patterns been introduced. If so, which should be follow and which are still supported?
I would recommend using the example set out by the github repo 14. NLP-with-Dispatch. The code in the tutorial you linked is rather simplified, so it can be compressed to a single file. It is only meant to show the how the routing for multiple LUIS and/or QnA models can be used. The Github sample is more robust, showing further customization.

Resources