My Alexa intent has no slot defined, no matter what I ask - node.js

I'm using a custom Lambda function to handle my custom Alexa skill. I have a single slot, which I want to capture. Unfortunately, the intent slot is never populated with any value, and seems to be getting ignored, despite being in the request.
Here is my code (note: the JSON.stringify is there to help me debug):
'use strict';
var Alexa = require("alexa-sdk");
exports.handler = function(event, context, callback) {
var alexa = Alexa.handler(event, context);
alexa.registerHandlers(handlers);
alexa.execute();
};
var handlers = {
'LaunchRequest': function () {
this.emit('Snow');
},
'Snow': function () {
var place = this.event.request.intent.slots.Place;
var text = 'Over the next 9 days you can expect 12 centimeters of snowfall in '+ JSON.stringify(place) +'. The upper piste has a depth of 75 centimeters and the lower piste has a depth of 35 centimeters. All 19 lifts are currently open.';
this.emit(':tell', text);
}
};
I have an Intent called "Snow" and a slot called "Place". Here is my interaction model:
{
"interactionModel": {
"languageModel": {
"invocationName": "ski club",
"intents": [
{
"name": "AMAZON.FallbackIntent",
"samples": []
},
{
"name": "AMAZON.CancelIntent",
"samples": []
},
{
"name": "AMAZON.HelpIntent",
"samples": []
},
{
"name": "AMAZON.StopIntent",
"samples": []
},
{
"name": "AMAZON.NavigateHomeIntent",
"samples": []
},
{
"name": "Snow",
"slots": [
{
"name": "Place",
"type": "AMAZON.City"
}
],
"samples": [
"how the powder is",
"what the powder is like",
"how the snow is",
"what the snow is like",
"snow conditions",
"snow condition",
"snow fall",
"snowfall",
"powder levels",
"powder level",
"snow level",
"powder depth",
"snow levels",
"snow depth",
"powder",
"snow",
"the snow"
]
}
],
"types": []
},
"dialog": {
"intents": [
{
"name": "Snow",
"confirmationRequired": false,
"prompts": {},
"slots": [
{
"name": "Place",
"type": "AMAZON.City",
"confirmationRequired": false,
"elicitationRequired": true,
"prompts": {
"elicitation": "Elicit.Slot.115004419453.153941565683"
}
}
]
}
]
},
"prompts": [
{
"id": "Elicit.Slot.115004419453.153941565683",
"variations": [
{
"type": "PlainText",
"value": "Where would you like to know about the snow?"
}
]
}
]
}
}
I would expect to be able to ask:
Alexa ask my app how the snow is in Morzine
And I would expect an answer back with the static text from my Lambda function, including the name inserted. However, I'm getting the following:
{
"body": {
"version": "1.0",
"response": {
"outputSpeech": {
"type": "SSML",
"ssml": "<speak> Over the next 9 days you can expect 12 centimeters of snowfall in {\"name\":\"Place\",\"confirmationStatus\":\"NONE\"}. The upper piste has a depth of 75 centimeters and the lower piste has a depth of 35 centimeters. All 19 lists are currently open. </speak>"
},
"shouldEndSession": true
},
"sessionAttributes": {},
"userAgent": "ask-nodejs/1.0.25 Node/v8.10.0"
}
}
It seems Place never has a value.
I get the exact same response if I omit the Place slot entirely:
Alexa ask my app how the snow is
Here I would expect to be asked to give a slot.
Here is the JSON Input from Alexa (I've redacted some keys):
{
"version": "1.0",
"session": {
"new": true,
"sessionId": "amzn1.echo-api.session.***",
"application": {
"applicationId": "amzn1.ask.skill.***"
},
"user": {
"userId": "amzn1.ask.account.***"
}
},
"context": {
"System": {
"application": {
"applicationId": "amzn1.ask.skill.***"
},
"user": {
"userId": "amzn1.ask.account.***"
},
"device": {
"deviceId": "amzn1.ask.device.***",
"supportedInterfaces": {}
},
"apiEndpoint": "https://api.eu.amazonalexa.com",
"apiAccessToken": "***.***.***
},
"Viewport": {
"experiences": [
{
"arcMinuteWidth": 246,
"arcMinuteHeight": 144,
"canRotate": false,
"canResize": false
}
],
"shape": "RECTANGLE",
"pixelWidth": 1024,
"pixelHeight": 600,
"dpi": 160,
"currentPixelWidth": 1024,
"currentPixelHeight": 600,
"touch": [
"SINGLE"
]
}
},
"request": {
"type": "IntentRequest",
"requestId": "amzn1.echo-api.request.92fc43d8-0dc2-4a08-a31a-70a031e2fef7",
"timestamp": "2018-11-16T16:51:17Z",
"locale": "en-GB",
"intent": {
"name": "Snow",
"confirmationStatus": "NONE",
"slots": {
"Place": {
"name": "Place",
"confirmationStatus": "NONE"
}
}
},
"dialogState": "STARTED"
}
}

My intents need to include the slot itself, it isn't assumed to be in there like I thought. This is done by appending {Place}.

Related

Receive an error after clicking carousel item

We have an action running in production just fine. It has just started returning an error after trying to click or invoke an item from a carousel.
Steps to reproduce are as follows
Talk to Action
Carosel loads fine
Invoke any item in Carousel via click or voice
receive the error
Sorry, {action} is not responding at this time. Please try again later.
Here is a snippet of how it is being built.
There is nothing reported as an error in stackdriver logs
/**
* #description Build the Carousel
* #private
* #static
* #param {CardListItem[]} listItems - Array of CardListItem to transform
* #returns {Carousel}
* Returns a Carousel
* #memberof TemplateBuilder
*/
private static getCarousel(listItems: CardListItem[]): Carousel {
const items = [] as GoogleActionsV2UiElementsCarouselSelectCarouselItem[];
listItems = this.sortListTemplateListItemsByGoogleIndexAscending(listItems);
for (const listItem of listItems) {
if (listItem.googleIndex) {
items.push(this.getCarouselItem(listItem));
}
}
return new Carousel({
items,
display: 'DEFAULT',
});
}
/**
* #description Build a CarouselItem
* #private
* #static
* #param {CardListItem} data - Template data of type ListTemplateData
* #returns {GoogleActionsV2UiElementsCarouselSelectCarouselItem}
* Returns a Carousel Item
* #memberof TemplateBuilder
*/
private static getCarouselItem(data: CardListItem): GoogleActionsV2UiElementsCarouselSelectCarouselItem {
const title = data.textContent ? this.convertToPlainText(data.textContent) : null;
const imageUrl = typeof data.image === 'string' ? data.image : data.image.imageUrl;
return {
optionInfo: {
key: data.intentHandler || data.token,
synonyms: [],
} as GoogleActionsV2OptionInfo,
image: new Image({
url: this.getImageURL(imageUrl),
alt: title,
height: null,
width: null,
}),
title,
} as GoogleActionsV2UiElementsCarouselSelectCarouselItem;
}
Response from the web hook
{
"payload": {
"google": {
"expectUserResponse": true,
"systemIntent": {
"intent": "actions.intent.OPTION",
"data": {
"#type": "type.googleapis.com/google.actions.v2.OptionValueSpec",
"carouselSelect": {
"items": [
{
"optionInfo": {
"key": "AccountBalanceIntent",
"synonyms": []
},
"image": {
"url": "https://aglvoiceprod.blob.core.windows.net/images/google/thumb-account.jpg",
"accessibilityText": "Account Balance",
"height": null,
"width": null
},
"title": "Account Balance"
},
{
"optionInfo": {
"key": "UsageCostIntent",
"synonyms": []
},
"image": {
"url": "https://aglvoiceprod.blob.core.windows.net/images/google/thumb-usage.jpg",
"accessibilityText": "Energy Usage",
"height": null,
"width": null
},
"title": "Energy Usage"
},
{
"optionInfo": {
"key": "BillAmountIntent",
"synonyms": []
},
"image": {
"url": "https://aglvoiceprod.blob.core.windows.net/images/google/thumb-bill.jpg",
"accessibilityText": "Bill History",
"height": null,
"width": null
},
"title": "Bill History"
},
{
"optionInfo": {
"key": "EnergyInsightsIntent",
"synonyms": []
},
"image": {
"url": "https://aglvoiceprod.blob.core.windows.net/images/google/thumb-insights.jpg",
"accessibilityText": "Energy Insights",
"height": null,
"width": null
},
"title": "Energy Insights"
},
{
"optionInfo": {
"key": "BillDueDateIntent",
"synonyms": []
},
"image": {
"url": "https://aglvoiceprod.blob.core.windows.net/images/google/thumb-bill-due-date.jpg",
"accessibilityText": "Bill Due Date",
"height": null,
"width": null
},
"title": "Bill Due Date"
},
{
"optionInfo": {
"key": "EnergyAlertIntent",
"synonyms": []
},
"image": {
"url": "https://aglvoiceprod.blob.core.windows.net/images/google/thumb-alert.jpg",
"accessibilityText": "Energy Alert",
"height": null,
"width": null
},
"title": "Energy Alert"
},
{
"optionInfo": {
"key": "EnergyTipIntent",
"synonyms": []
},
"image": {
"url": "https://aglvoiceprod.blob.core.windows.net/images/google/thumb-tip.jpg",
"accessibilityText": "Energy Tip",
"height": null,
"width": null
},
"title": "Energy Tip"
}
],
"imageDisplayOptions": "DEFAULT"
}
}
},
"richResponse": {
"items": [
{
"simpleResponse": {
"textToSpeech": "<speak>Hi Brent,<break time=\"200ms\"/> welcome to <say-as interpret-as=\"spell-out\">AGL</say-as>.<break time=\"400ms\"/> With our latest update,<break time=\"200ms\"/> you can now set up Energy Alerts.<break time=\"300ms\"/> For other ways that I can help,<break time=\"200ms\"/> say \"more information\".<break time=\"350ms\"/> How can I help you today?</speak>",
"displayText": "Hi Brent, welcome to AGL. With our latest update, you can now set up Energy Alerts. For other ways that I can help, say \"more information\". How can I help you today?"
}
}
],
"suggestions": [
{
"title": "Monthly Billing"
},
{
"title": "eBilling"
},
{
"title": "Payment Extension"
},
{
"title": "Feedback"
},
{
"title": "Help"
}
]
}
}
},
"outputContexts": [
{
"name": "projects/agl-prod-603de/agent/sessions/ABwppHESxXC8wRCVt2-hskWfsZwxEoH54G6jm_yTj2oIiETa1V3EbgRGj86H7Ys5bEKG0YrzkL0fE9Mpnhk/contexts/_actions_on_google",
"lifespanCount": 99,
"parameters": {
"data": "{\"sessionLaunched\":true,\"skippedIntents\":[],\"possibleIntents\":[],\"followUp\":{\"yes\":{\"data\":null,\"intent\":\"Unhandled\"},\"no\":{\"data\":null,\"intent\":\"Unhandled\"},\"fuelType\":{\"elec\":{\"data\":null,\"intent\":\"Unhandled\"},\"gas\":{\"data\":null,\"intent\":\"Unhandled\"},\"both\":{\"data\":null,\"intent\":\"Unhandled\"}},\"number\":{\"data\":null,\"intent\":\"Unhandled\",\"slotName\":null},\"period\":{\"data\":null,\"intent\":\"Unhandled\",\"slotName\":null}},\"lastIntent\":\"LaunchRequest\"}"
}
}
]
}
flow in simulator

Facing an error while building a custom skil for amazon alexa

I am trying to a build a basic custom alexa skill. I have created an intent schema and am using AWS lambda function as the endpoint. ]
My Intent schema:
{
"interactionModel": {
"languageModel": {
"invocationName": "toit brewpub",
"modelConfiguration": {
"fallbackIntentSensitivity": {
"level": "LOW"
}
},
"intents": [
{
"name": "AMAZON.FallbackIntent",
"samples": []
},
{
"name": "AMAZON.CancelIntent",
"samples": []
},
{
"name": "AMAZON.HelpIntent",
"samples": []
},
{
"name": "AMAZON.StopIntent",
"samples": []
},
{
"name": "AMAZON.NavigateHomeIntent",
"samples": []
},
{
"name": "GetClosingTime",
"slots": [],
"samples": [
"what time do you close",
"when is the last order",
"till what time are you open",
"What time does the pub close"
]
},
{
"name": "GetPriceOfBeer",
"slots": [
{
"name": "beer",
"type": "BEERS"
}
],
"samples": [
"how much is {beer}",
"what is the price of {beer}"
]
}
],
"types": [
{
"name": "BEERS",
"values": [
{
"name": {
"value": "Toit Red"
}
},
{
"name": {
"value": "Tiot Weiss"
}
},
{
"name": {
"value": "Basmati Blonde"
}
},
{
"name": {
"value": "Tintin Toit"
}
},
{
"name": {
"value": "IPA"
}
},
{
"name": {
"value": "Dark Knight"
}
}
]
}
]
}
}
}
I am using Node.js v 10.x for my lamda function which has been built using Alexa-Skills-NodeJS-Fact-kit, The region for my aws lambda is US_EAST- N.VIRGINIA.
Below is the request I receive when I talk to my Test Simulator:
{
"version": "1.0",
"session": {
"new": false,
"sessionId": "amzn1.echo-api.session.fd1c5315-ecf8-413f-ba25-e54bd6ae316a",
"application": {
"applicationId": "amzn1.ask.skill.72615503-5f38-4baf-b0dd-cd6edd3b6dfd"
},
"user": {
"userId": ""
}
},
"context": {
"System": {
"application": {
"applicationId": "amzn1.ask.skill.72615503-5f38-4baf-b0dd-cd6edd3b6dfd"
},
"user": {
"userId": ""
},
"device": {
"deviceId": "",
"supportedInterfaces": {}
},
"apiEndpoint": "https://api.eu.amazonalexa.com",
"apiAccessToken": ""
},
"Viewport": {
"experiences": [
{
"arcMinuteWidth": 246,
"arcMinuteHeight": 144,
"canRotate": false,
"canResize": false
}
],
"shape": "RECTANGLE",
"pixelWidth": 1024,
"pixelHeight": 600,
"dpi": 160,
"currentPixelWidth": 1024,
"currentPixelHeight": 600,
"touch": [
"SINGLE"
],
"video": {
"codecs": [
"H_264_42",
"H_264_41"
]
}
},
"Viewports": [
{
"type": "APL",
"id": "main",
"shape": "RECTANGLE",
"dpi": 160,
"presentationType": "STANDARD",
"canRotate": false,
"configuration": {
"current": {
"video": {
"codecs": [
"H_264_42",
"H_264_41"
]
},
"size": {
"type": "DISCRETE",
"pixelWidth": 1024,
"pixelHeight": 600
}
}
}
}
]
},
"request": {
"type": "SessionEndedRequest",
"requestId": "amzn1.echo-api.request.24b64895-3f90-4a5b-9805-9d3b038cd323",
"timestamp": "2020-03-29T08:59:54Z",
"locale": "en-US",
"reason": "ERROR",
"error": {
"type": "INVALID_RESPONSE",
"message": "An exception occurred while dispatching the request to the skill."
}
}
}
I have removed the user Id, device ID and access token while asking the question for security reasons.
My Lambda node js function looks like this which i have generated using the code generator :
https://github.com/shreyneil/Episodes/blob/master/amazon-echo/lambda-function.js
Url for code-generator: http://alexa.codegenerator.s3-website-us-east-1.amazonaws.com/
Url for tutorial that i was using to implement it: https://www.youtube.com/watch?v=BB3wwxgqPOU
Whenever i try to launch the event using , open toit brewpub in my test simulator it thorws an error stating :
There was a problem with the requested skill's response
Any idea on how to make this work?
Any leads would appreciated, Thank you!

Dialogflow textToSpeech fulfilment not reading aloud the text

I am providing users with a response on an audio only device (e.g. google home), when I respond with a textToSpeech field within a simpleResponse, the speech is not read out in the simulator.
Has anyone experienced this and know how to fix?
I've tried different response types but none of them read out the textToSpeech field.
Also tried ticking/unticking end conversation toggle in Dialogflow and expectUserInput true/false when responding with JSON to no avail.
The response is currently fulfilled by a webhook which responds with JSON v2 fulfilment blob and the simulator receives the response with no errors but does not read it out.
RESPONSE -
{
"payload": {
"google": {
"expectUserResponse": true,
"richResponse": {
"items": [
{
"simpleResponse": {
"textToSpeech": "Here are the 3 closest restaurants that match your criteria,"
}
}
]
}
}
}
}
REQUEST -
{
"responseId": "404f3b65-73a5-47db-9c17-0fc8b31560a5",
"queryResult": {
"queryText": "actions_intent_NEW_SURFACE",
"parameters": {},
"allRequiredParamsPresent": true,
"outputContexts": [
{
"name": "projects/my-project/agent/sessions/sessionId/contexts/findrestaurantswithcuisineandlocation-followup",
"lifespanCount": 98,
"parameters": {
"location.original": "Shoreditch",
"cuisine.original": "international",
"cuisine": "International",
"location": {
"subadmin-area": "Shoreditch",
"subadmin-area.original": "Shoreditch",
"subadmin-area.object": {}
}
}
},
{
"name": "projects/my-project/agent/sessions/sessionId/contexts/actions_capability_account_linking"
},
{
"name": "projects/my-project/agent/sessions/sessionId/contexts/actions_capability_audio_output"
},
{
"name": "projects/my-project/agent/sessions/sessionId/contexts/google_assistant_input_type_voice"
},
{
"name": "projects/my-project/agent/sessions/sessionId/contexts/actions_capability_media_response_audio"
},
{
"name": "projects/my-project/agent/sessions/sessionId/contexts/actions_intent_new_surface",
"parameters": {
"text": "no",
"NEW_SURFACE": {
"#type": "type.googleapis.com/google.actions.v2.NewSurfaceValue",
"status": "CANCELLED"
}
}
}
],
"intent": {
"name": "projects/my-project/agent/intents/0baefc9d-689c-4c33-b2b8-4e130f626de1",
"displayName": "Send restaurants to mobile"
},
"intentDetectionConfidence": 1,
"languageCode": "en-us"
},
"originalDetectIntentRequest": {
"source": "google",
"version": "2",
"payload": {
"isInSandbox": true,
"surface": {
"capabilities": [
{
"name": "actions.capability.AUDIO_OUTPUT"
},
{
"name": "actions.capability.MEDIA_RESPONSE_AUDIO"
},
{
"name": "actions.capability.ACCOUNT_LINKING"
}
]
},
"requestType": "SIMULATOR",
"inputs": [
{
"rawInputs": [
{
"query": "no",
"inputType": "VOICE"
}
],
"arguments": [
{
"extension": {
"#type": "type.googleapis.com/google.actions.v2.NewSurfaceValue",
"status": "CANCELLED"
},
"name": "NEW_SURFACE"
},
{
"rawText": "no",
"textValue": "no",
"name": "text"
}
],
"intent": "actions.intent.NEW_SURFACE"
}
],
"user": {
"userStorage": "{\"data\":{}}",
"lastSeen": "2019-04-12T14:31:23Z",
"locale": "en-US",
"userId": "userID"
},
"conversation": {
"conversationId": "sessionId",
"type": "ACTIVE",
"conversationToken": "[\"defaultwelcomeintent-followup\",\"findrestaurantswithcuisineandlocation-followup\",\"findrestaurantswithcuisineandlocation-followup-2\"]"
},
"availableSurfaces": [
{
"capabilities": [
{
"name": "actions.capability.AUDIO_OUTPUT"
},
{
"name": "actions.capability.SCREEN_OUTPUT"
},
{
"name": "actions.capability.WEB_BROWSER"
}
]
}
]
}
},
"session": "projects/my-project/agent/sessions/sessionId"
}
I expect the simulator to read out the result of textToSpeech but currently does not.

How to get custom intent slot values using handlerInput in ASK-SDK v2

I'm creating a basic calculator skill using ASK-SDK v2. I'm not sure how to get the slot values provided by the user into the Lambda code with the new version. I was able to make it work with the older version.
Conversation
User: Open calculate
Alexa: You can ask me to add, subtract, multiply and divide
User: Add two and three
Alexa: Sum of 2 and 3 is 5
Below is my IntentSchema
{
"interactionModel": {
"languageModel": {
"invocationName": "calculate",
"intents": [
{
"name": "AMAZON.CancelIntent",
"samples": []
},
{
"name": "AMAZON.HelpIntent",
"samples": []
},
{
"name": "AMAZON.StopIntent",
"samples": []
},
{
"name": "AddIntent",
"slots": [
{
"name": "numA",
"type": "AMAZON.NUMBER"
},
{
"name": "numB",
"type": "AMAZON.NUMBER"
}
],
"samples": [
"Sum of {numA} and {numB}",
"add {numA} and {numB}"
]
},
{
"name": "SubIntent",
"slots": [
{
"name": "numA",
"type": "AMAZON.NUMBER"
},
{
"name": "numB",
"type": "AMAZON.NUMBER"
}
],
"samples": [
"difference between {numA} and {numB}",
"subtract {numA} from {numB}"
]
},
{
"name": "ProductIntent",
"slots": [
{
"name": "numA",
"type": "AMAZON.NUMBER"
},
{
"name": "numB",
"type": "AMAZON.NUMBER"
}
],
"samples": [
"multiply {numA} and {numB}",
"product of {numA} and {numB}"
]
},
{
"name": "DivideIntent",
"slots": [
{
"name": "numA",
"type": "AMAZON.NUMBER"
},
{
"name": "numB",
"type": "AMAZON.NUMBER"
}
],
"samples": [
"divide {numB} by {numA}",
"divide {numA} by {numB}"
]
},
{
"name": "ExponentialIntent",
"slots": [
{
"name": "numA",
"type": "AMAZON.NUMBER"
},
{
"name": "numB",
"type": "AMAZON.NUMBER"
},
{
"name": "numC",
"type": "AMAZON.NUMBER"
}
],
"samples": [
"{numA} raised to the power of {numB} by {numC}",
"{numA} raised to the power {numB}"
]
},
{
"name": "AMAZON.NavigateHomeIntent",
"samples": []
}
],
"types": []
}
}
}
I'm adding the addintenthandler here. Please tell me if the approach I'm using to get the slot values from the intent is correct or if I should use sessionattributes
const AddIntentHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === 'IntentRequest'
&& handlerInput.requestEnvelope.request.intent.name === 'AddIntent';
},
handle(handlerInput) {
var output1 = "";
var num1 = handlerInput.resuestEnvelope.request.intent.slots.numA.value;
var num2 = handlerInput.resuestEnvelope.request.intent.slots.numB.value;
if((num1)&&(num2)){
output1 = 'The sum of ' +num1+ ' and ' +num2+ ' is ' + (num1+num2);
}
else {
output1 = 'Enter valid number';
}
const speechText = output1;
return handlerInput.responseBuilder
.speak(speechText)
.reprompt(speechText)
.getResponse();
}
};
Alexa responds with "Unable to process requested skill response"
Any help is welcome
Update: there are now built-in functions in the SDK for this:
Alexa.getSlotValue() (returns the string value) and getSlot() (returns Slot object)
Alexa.getSlotValue(handlerInput.requestEnvelope, "someSlotName")
Old answer:
You have a typo, resuestEnvelope should be requestEnvelope. In any case I have created exactly the same skill, a calculator (in Spanish but it's basically the same thing) and I use a helper function called getSlotValues() which I encourage you to reuse. It will also work great when you have to capture custom slots (which are processed differently because the entity resolution structure is different):
https://github.com/germanviscuso/skill-sample-nodejs-mycalculator

Why does Alexa SDK throw an error when migrating from Dialogflow

I'm trying to migrate my action form Dialogflow, and the most important thing is the intent schema. But after uploading the .json file, the error Intent name must not be empty. Error code: MissingIntentName is thrown. Here is Intent schema.json
{
"intents": [
{
"intent": "SelectedSubjectsYes"
},
{
"intent": "UserIsOk",
"slots": [
{
"name": "okslot",
"type": "OK"
}
]
},
{
"intent": "SelectedSubjectsNo"
},
{
"intent": "UserIsNotOk",
"slots": [
{
"name": "not_okslot",
"type": "NOT_OK"
}
]
},
{
"intent": "DefaultWelcomeIntent"
},
{
"intent": "HowAreYou?"
},
{
"intent": "SelectedSubjects",
"slots": [
{
"name": "subjectslot",
"type": "SUBJECT"
}
]
}
]
}
I've in no way edited it, so why the error? Thanks in advance.
The JSON structure for interaction model is sightly different. This is how it should look now.
{
"interactionModel": {
"languageModel": {
"invocationName": "Your invocation name",
"intents": [
{
"name": "SelectedSubjectsYes",
"slots": [],
"samples": [
"provide sample for SelectedSubjectsYes intent",
"sample for SelectedSubjectsYes intent"
]
},
{
"name": "UserIsOk",
"slots": [
{
"name": "okslot",
"type": "OK"
}
],
"samples": [
"provide other samples for UserIsOk",
"I'm {okslot}",
"{okslot}"
]
},
{
"name": "SelectedSubjectsNo",
"slots": [],
"samples": [
"provide sample for SelectedSubjectsNo intent",
"sample for SelectedSubjectsNo intent"
]
},
{
"name": "UserIsNotOk",
"slots": [
{
"name": "not_okslot",
"type": "NOT_OK"
}
],
"samples": [
"provide other samples for UserIsNotOk",
"i'm {not_okslot}",
"{not_okslot}"
]
},
{
"name": "HowAreYou?",
"slots": [],
"samples": [
"provide sample for HowAreYou intent",
"sample for HowAreYou intent"
]
},
{
"name": "SelectedSubjects",
"slots": [
{
"name": "subjectslot",
"type": "SUBJECT"
}
],
"samples": [
"provide other samples for SelectedSubjects",
"i choose {subjectslot}"
]
}
],
"types": [
{
"name": "OK",
"values": [
{
"name": {
"value": "ok"
}
},
{
"name": {
"value": "yes"
}
}
]
},
{
"name": "NOT_OK",
"values": [
{
"name": {
"value": "not ok"
}
},
{
"name": {
"value": "nope"
}
}
]
},
{
"name": "SUBJECT",
"values": [
{
"name": {
"value": "Physics"
}
},
{
"name": {
"value": "Biology"
}
}
]
}
]
}
}
}
Rather than converting from Dialog flow, it's pretty easy to design one in Alexa skill builder. Also, it is recommended to use predefined AMAZON.YesIntent and AMAZON.NoIntent for "yes" or "no" utterances.

Resources