Why does Alexa SDK throw an error when migrating from Dialogflow - dialogflow-es

I'm trying to migrate my action form Dialogflow, and the most important thing is the intent schema. But after uploading the .json file, the error Intent name must not be empty. Error code: MissingIntentName is thrown. Here is Intent schema.json
{
"intents": [
{
"intent": "SelectedSubjectsYes"
},
{
"intent": "UserIsOk",
"slots": [
{
"name": "okslot",
"type": "OK"
}
]
},
{
"intent": "SelectedSubjectsNo"
},
{
"intent": "UserIsNotOk",
"slots": [
{
"name": "not_okslot",
"type": "NOT_OK"
}
]
},
{
"intent": "DefaultWelcomeIntent"
},
{
"intent": "HowAreYou?"
},
{
"intent": "SelectedSubjects",
"slots": [
{
"name": "subjectslot",
"type": "SUBJECT"
}
]
}
]
}
I've in no way edited it, so why the error? Thanks in advance.

The JSON structure for interaction model is sightly different. This is how it should look now.
{
"interactionModel": {
"languageModel": {
"invocationName": "Your invocation name",
"intents": [
{
"name": "SelectedSubjectsYes",
"slots": [],
"samples": [
"provide sample for SelectedSubjectsYes intent",
"sample for SelectedSubjectsYes intent"
]
},
{
"name": "UserIsOk",
"slots": [
{
"name": "okslot",
"type": "OK"
}
],
"samples": [
"provide other samples for UserIsOk",
"I'm {okslot}",
"{okslot}"
]
},
{
"name": "SelectedSubjectsNo",
"slots": [],
"samples": [
"provide sample for SelectedSubjectsNo intent",
"sample for SelectedSubjectsNo intent"
]
},
{
"name": "UserIsNotOk",
"slots": [
{
"name": "not_okslot",
"type": "NOT_OK"
}
],
"samples": [
"provide other samples for UserIsNotOk",
"i'm {not_okslot}",
"{not_okslot}"
]
},
{
"name": "HowAreYou?",
"slots": [],
"samples": [
"provide sample for HowAreYou intent",
"sample for HowAreYou intent"
]
},
{
"name": "SelectedSubjects",
"slots": [
{
"name": "subjectslot",
"type": "SUBJECT"
}
],
"samples": [
"provide other samples for SelectedSubjects",
"i choose {subjectslot}"
]
}
],
"types": [
{
"name": "OK",
"values": [
{
"name": {
"value": "ok"
}
},
{
"name": {
"value": "yes"
}
}
]
},
{
"name": "NOT_OK",
"values": [
{
"name": {
"value": "not ok"
}
},
{
"name": {
"value": "nope"
}
}
]
},
{
"name": "SUBJECT",
"values": [
{
"name": {
"value": "Physics"
}
},
{
"name": {
"value": "Biology"
}
}
]
}
]
}
}
}
Rather than converting from Dialog flow, it's pretty easy to design one in Alexa skill builder. Also, it is recommended to use predefined AMAZON.YesIntent and AMAZON.NoIntent for "yes" or "no" utterances.

Related

Facing an error while building a custom skil for amazon alexa

I am trying to a build a basic custom alexa skill. I have created an intent schema and am using AWS lambda function as the endpoint. ]
My Intent schema:
{
"interactionModel": {
"languageModel": {
"invocationName": "toit brewpub",
"modelConfiguration": {
"fallbackIntentSensitivity": {
"level": "LOW"
}
},
"intents": [
{
"name": "AMAZON.FallbackIntent",
"samples": []
},
{
"name": "AMAZON.CancelIntent",
"samples": []
},
{
"name": "AMAZON.HelpIntent",
"samples": []
},
{
"name": "AMAZON.StopIntent",
"samples": []
},
{
"name": "AMAZON.NavigateHomeIntent",
"samples": []
},
{
"name": "GetClosingTime",
"slots": [],
"samples": [
"what time do you close",
"when is the last order",
"till what time are you open",
"What time does the pub close"
]
},
{
"name": "GetPriceOfBeer",
"slots": [
{
"name": "beer",
"type": "BEERS"
}
],
"samples": [
"how much is {beer}",
"what is the price of {beer}"
]
}
],
"types": [
{
"name": "BEERS",
"values": [
{
"name": {
"value": "Toit Red"
}
},
{
"name": {
"value": "Tiot Weiss"
}
},
{
"name": {
"value": "Basmati Blonde"
}
},
{
"name": {
"value": "Tintin Toit"
}
},
{
"name": {
"value": "IPA"
}
},
{
"name": {
"value": "Dark Knight"
}
}
]
}
]
}
}
}
I am using Node.js v 10.x for my lamda function which has been built using Alexa-Skills-NodeJS-Fact-kit, The region for my aws lambda is US_EAST- N.VIRGINIA.
Below is the request I receive when I talk to my Test Simulator:
{
"version": "1.0",
"session": {
"new": false,
"sessionId": "amzn1.echo-api.session.fd1c5315-ecf8-413f-ba25-e54bd6ae316a",
"application": {
"applicationId": "amzn1.ask.skill.72615503-5f38-4baf-b0dd-cd6edd3b6dfd"
},
"user": {
"userId": ""
}
},
"context": {
"System": {
"application": {
"applicationId": "amzn1.ask.skill.72615503-5f38-4baf-b0dd-cd6edd3b6dfd"
},
"user": {
"userId": ""
},
"device": {
"deviceId": "",
"supportedInterfaces": {}
},
"apiEndpoint": "https://api.eu.amazonalexa.com",
"apiAccessToken": ""
},
"Viewport": {
"experiences": [
{
"arcMinuteWidth": 246,
"arcMinuteHeight": 144,
"canRotate": false,
"canResize": false
}
],
"shape": "RECTANGLE",
"pixelWidth": 1024,
"pixelHeight": 600,
"dpi": 160,
"currentPixelWidth": 1024,
"currentPixelHeight": 600,
"touch": [
"SINGLE"
],
"video": {
"codecs": [
"H_264_42",
"H_264_41"
]
}
},
"Viewports": [
{
"type": "APL",
"id": "main",
"shape": "RECTANGLE",
"dpi": 160,
"presentationType": "STANDARD",
"canRotate": false,
"configuration": {
"current": {
"video": {
"codecs": [
"H_264_42",
"H_264_41"
]
},
"size": {
"type": "DISCRETE",
"pixelWidth": 1024,
"pixelHeight": 600
}
}
}
}
]
},
"request": {
"type": "SessionEndedRequest",
"requestId": "amzn1.echo-api.request.24b64895-3f90-4a5b-9805-9d3b038cd323",
"timestamp": "2020-03-29T08:59:54Z",
"locale": "en-US",
"reason": "ERROR",
"error": {
"type": "INVALID_RESPONSE",
"message": "An exception occurred while dispatching the request to the skill."
}
}
}
I have removed the user Id, device ID and access token while asking the question for security reasons.
My Lambda node js function looks like this which i have generated using the code generator :
https://github.com/shreyneil/Episodes/blob/master/amazon-echo/lambda-function.js
Url for code-generator: http://alexa.codegenerator.s3-website-us-east-1.amazonaws.com/
Url for tutorial that i was using to implement it: https://www.youtube.com/watch?v=BB3wwxgqPOU
Whenever i try to launch the event using , open toit brewpub in my test simulator it thorws an error stating :
There was a problem with the requested skill's response
Any idea on how to make this work?
Any leads would appreciated, Thank you!

Dialogflow textToSpeech fulfilment not reading aloud the text

I am providing users with a response on an audio only device (e.g. google home), when I respond with a textToSpeech field within a simpleResponse, the speech is not read out in the simulator.
Has anyone experienced this and know how to fix?
I've tried different response types but none of them read out the textToSpeech field.
Also tried ticking/unticking end conversation toggle in Dialogflow and expectUserInput true/false when responding with JSON to no avail.
The response is currently fulfilled by a webhook which responds with JSON v2 fulfilment blob and the simulator receives the response with no errors but does not read it out.
RESPONSE -
{
"payload": {
"google": {
"expectUserResponse": true,
"richResponse": {
"items": [
{
"simpleResponse": {
"textToSpeech": "Here are the 3 closest restaurants that match your criteria,"
}
}
]
}
}
}
}
REQUEST -
{
"responseId": "404f3b65-73a5-47db-9c17-0fc8b31560a5",
"queryResult": {
"queryText": "actions_intent_NEW_SURFACE",
"parameters": {},
"allRequiredParamsPresent": true,
"outputContexts": [
{
"name": "projects/my-project/agent/sessions/sessionId/contexts/findrestaurantswithcuisineandlocation-followup",
"lifespanCount": 98,
"parameters": {
"location.original": "Shoreditch",
"cuisine.original": "international",
"cuisine": "International",
"location": {
"subadmin-area": "Shoreditch",
"subadmin-area.original": "Shoreditch",
"subadmin-area.object": {}
}
}
},
{
"name": "projects/my-project/agent/sessions/sessionId/contexts/actions_capability_account_linking"
},
{
"name": "projects/my-project/agent/sessions/sessionId/contexts/actions_capability_audio_output"
},
{
"name": "projects/my-project/agent/sessions/sessionId/contexts/google_assistant_input_type_voice"
},
{
"name": "projects/my-project/agent/sessions/sessionId/contexts/actions_capability_media_response_audio"
},
{
"name": "projects/my-project/agent/sessions/sessionId/contexts/actions_intent_new_surface",
"parameters": {
"text": "no",
"NEW_SURFACE": {
"#type": "type.googleapis.com/google.actions.v2.NewSurfaceValue",
"status": "CANCELLED"
}
}
}
],
"intent": {
"name": "projects/my-project/agent/intents/0baefc9d-689c-4c33-b2b8-4e130f626de1",
"displayName": "Send restaurants to mobile"
},
"intentDetectionConfidence": 1,
"languageCode": "en-us"
},
"originalDetectIntentRequest": {
"source": "google",
"version": "2",
"payload": {
"isInSandbox": true,
"surface": {
"capabilities": [
{
"name": "actions.capability.AUDIO_OUTPUT"
},
{
"name": "actions.capability.MEDIA_RESPONSE_AUDIO"
},
{
"name": "actions.capability.ACCOUNT_LINKING"
}
]
},
"requestType": "SIMULATOR",
"inputs": [
{
"rawInputs": [
{
"query": "no",
"inputType": "VOICE"
}
],
"arguments": [
{
"extension": {
"#type": "type.googleapis.com/google.actions.v2.NewSurfaceValue",
"status": "CANCELLED"
},
"name": "NEW_SURFACE"
},
{
"rawText": "no",
"textValue": "no",
"name": "text"
}
],
"intent": "actions.intent.NEW_SURFACE"
}
],
"user": {
"userStorage": "{\"data\":{}}",
"lastSeen": "2019-04-12T14:31:23Z",
"locale": "en-US",
"userId": "userID"
},
"conversation": {
"conversationId": "sessionId",
"type": "ACTIVE",
"conversationToken": "[\"defaultwelcomeintent-followup\",\"findrestaurantswithcuisineandlocation-followup\",\"findrestaurantswithcuisineandlocation-followup-2\"]"
},
"availableSurfaces": [
{
"capabilities": [
{
"name": "actions.capability.AUDIO_OUTPUT"
},
{
"name": "actions.capability.SCREEN_OUTPUT"
},
{
"name": "actions.capability.WEB_BROWSER"
}
]
}
]
}
},
"session": "projects/my-project/agent/sessions/sessionId"
}
I expect the simulator to read out the result of textToSpeech but currently does not.

Can someone explain this behavior for interaction model of Alexa Skill?

This is the interaction model I am using:
{
"interactionModel": {
"languageModel": {
"invocationName": "greeter",
"intents": [
{
"name": "HelloWorldIntent",
"slots": [
{
"name": "phrase",
"type": "phrase"
}
],
"samples": [
"{phrase}"
]
}
],
"types": [
{
"name": "phrase",
"values": [
{
"name": {
"value": "HelloWorldIntent asdf {phrase}"
}
}
]
}
]
}
}
}
Notice the value of phrase slot type. When I set it this way, whatever I say to alexa, whole of the raw query is getting populated in value field of phrase slot of the request object.
Ex:
"Launch greeter" ==> LaunchIntent (obvious)
"No matter what I say it triggers Hello world intent" ==>
"intent": {
"name": "HelloWorldIntent",
"confirmationStatus": "NONE",
"slots": {
"phrase": {
"name": "phrase",
"value": "no matter what I say it triggers hello world intent",
"resolutions": {
"resolutionsPerAuthority": [
{
"authority": "amzn1.er-authority.echo-sdk.amzn1.ask.skill.6c1d0991-f895-45fa-ba37-6880d3cc95f1.phrase",
"status": {
"code": "ER_SUCCESS_NO_MATCH"
}
}
]
},
"confirmationStatus": "NONE"
}
}
}
I am not able to figure out how this interaction model is giving me the raw query in "value" field of phrase slot.

ALEXA - How to send slot ID in service request

I am creating custom Alexa skill, with custom slots. I have created predefined values for the slot and assigned an ID to each. During my tests I can see that in the service request there is no ID key-value pair:
"request": {
"type": "IntentRequest",
"requestId": "EdwRequestId.xXxxxxXXXx-xxXX-xXXx-xXXX-xxxXXXXXXxxx",
"intent": {
"name": "HowToIntent",
"slots": {
"action": {
"name": "action",
"value": "clear cache"
}
}
},
Is there any possibility to pass slot ID in the request?
"languageModel": {
"types": [
{
"name": "action",
"values": [
{
"id": "1",
"name": {
"value": "clear cache",
"synonyms": [
"flush cache",
"clean cache"
]
}
},
{
"id": "2",
"name": {
"value": "perform reindex",
"synonyms": [
"reindex",
"do reindex"
]
}
},
{
"id": "3",
"name": {
"value": "create a product",
"synonyms": [
"add product",
"make product"
]
}
},
{
"id": "4",
"name": {
"value": "create a category",
"synonyms": [
"add category",
"make category"
]
}
}
]
},
{
"name": "element",
"values": [
{
"id": "1",
"name": {
"value": "category tree",
"synonyms": [
"category structure",
"categories"
]
}
},
{
"id": "2",
"name": {
"value": "simple product",
"synonyms": []
}
},
{
"id": "3",
"name": {
"value": "gift card",
"synonyms": []
}
}
]
}
],
"intents": [
{
"name": "AMAZON.CancelIntent",
"samples": []
},
{
"name": "AMAZON.HelpIntent",
"samples": []
},
{
"name": "AMAZON.StopIntent",
"samples": []
},
{
"name": "HowToIntent",
"samples": [
"how to {action}"
],
"slots": [
{
"name": "action",
"type": "action"
}
]
},
{
"name": "WelcomeIntent",
"samples": [],
"slots": []
},
{
"name": "WhatIsIntent",
"samples": [
"what is {element}"
],
"slots": [
{
"name": "element",
"type": "element"
}
]
}
],
"invocationName": "my assistant"
}
}

Amazon Alexa Entity Resolution

I am having trouble getting Alexa to understand any synonyms for the words I'm speaking to her. She will always return that she does not know the meaning of the synonym despite having added it as part of the intent schema:
{
"languageModel": {
"types": [
{
"name": "LIST_OF_DEFINITIONS",
"values": [
{
"id": "USER_EXPERIENCE",
"name": {
"value": "user experience",
"synonyms": [
"ux"
]
}
}
]
}
],
"intents": [
{
"name": "AMAZON.CancelIntent",
"samples": []
},
{
"name": "AMAZON.HelpIntent",
"samples": []
},
{
"name": "AMAZON.RepeatIntent",
"samples": []
},
{
"name": "AMAZON.StopIntent",
"samples": []
},
{
"name": "RecipeIntent",
"samples": [
"what is a {Definition}"
],
"slots": [
{
"name": "Definition",
"type": "LIST_OF_DEFINITIONS"
}
]
}
],
"invocationName": "digital dictionary"
}
}
Am I missing something?
Your synonym "ux" isn't going to be understood because the user won't pronounce it like a word but rather they will say two letters. So, try adding "u x" and "U.X.". That should work.

Resources