Open nealrs opened 6 years ago
additionally, it looks like the data
key in outputContext
returns undefined upon followup conversation turns. As in:
utter(talk to $foo)
launches welcome intent as noted above & returns a full outputContext object.utter(next)
identifies the correct intent (nextStory), but throws the fallback error / returns an empty outputContext responsemocha test
describe.only('Next story', function() {
it('Skill Launch => Next', async function() {
let reply = await this.google.utter(`talk to ${invocationName}`).catch(err => console.info(err));
console.info(`response: ${JSON.stringify(reply, null, 2)}`);
let data = JSON.parse(reply.outputContexts[0].parameters.data);
console.log(`data: ${JSON.stringify(data, null, 2)}`);
assert.equal(data.nowPlayingIndex, 0, 'NOWPLAYINGINDEX != 0');
assert.equal(data.streak, 0, 'STREAK != 0');
assert.deepEqual(data.setlistParams.type, 'branded', 'INCORRECT TYPE SETLIST PARAM');
assert.deepEqual(data.setlistParams.platform, 'google', 'INCORRECT PLATFORM SETLIST PARAM');
assert.deepEqual(data.setlistParams.publisher, publisher_slug, 'INCORRECT PUBLISHER SETLIST PARAM');
assert.exists(data.setlist.stories, 'DOES NOT INCLUDE SETLIST STORY INFO');
assert.exists(data.setlist.publisher, 'DOES NOT INCLUDE SETLIST PUBLISHER INFO');
assert.deepEqual(reply.payload.google.richResponse.suggestions[0].title, 'More stories', 'INCORRECT SUGGESTION CHIP');
assert.deepEqual(reply.payload.google.richResponse.suggestions[1].title, 'I\'m done', 'INCORRECT SUGGESTION CHIP');
assert.exists(reply.payload.google.richResponse.items[0].simpleResponse, 'DOES NOT INCLUDE SIMPLE TEXT & AUDIO RESPONSE');
// INCLUDE CHECKS FOR RICH MEDIA HERE -- once it's included in the virtual google assistant package
reply = await this.google.utter('next').catch(err => console.info(err));
console.info(`response: ${JSON.stringify(reply, null, 2)}`);
data = JSON.parse(reply.outputContexts[0].parameters.data);
console.log(`data: ${JSON.stringify(data, null, 2)}`);
assert.equal(data.nowPlayingIndex, 1, 'NOWPLAYINGINDEX != 1');
assert.equal(data.streak, 0, 'STREAK != 0');
assert.deepEqual(reply.payload.google.richResponse.suggestions[0].title, 'More stories', 'INCORRECT SUGGESTION CHIP');
assert.deepEqual(reply.payload.google.richResponse.suggestions[1].title, 'I\'m done', 'INCORRECT SUGGESTION CHIP');
assert.exists(reply.payload.google.richResponse.items[0].simpleResponse, 'DOES NOT INCLUDE SIMPLE TEXT & AUDIO RESPONSE');
// INCLUDE CHECKS FOR RICH MEDIA HERE -- once it's included in the virtual google assistant package
// CHECK FOR MORE EPISODE SPECIFICS? WHATEVER WE FIND IN MEDIA RESPONSE
});
conv object output utter(next)
{
"responses": [],
"expectUserResponse": true,
"digested": false,
"_responded": false,
"request": {
"user": {}
},
"headers": {
"content-length": "559",
"content-type": "application/json",
"host": "127.0.0.1:3333",
"connection": "close"
},
"sandbox": false,
"input": {},
"surface": {
"capabilities": {
"list": []
}
},
"available": {
"surfaces": {
"list": [],
"capabilities": {
"surfaces": []
}
}
},
"user": {
"storage": {},
"permissions": [],
"last": {},
"name": {},
"entitlements": [],
"access": {}
},
"arguments": {
"parsed": {
"input": {},
"list": []
},
"status": {
"input": {},
"list": []
},
"raw": {
"list": [],
"input": {}
}
},
"device": {},
"body": {
"originalDetectIntentRequest": {
"source": "google",
"version": "2",
"payload": {
"user": {}
}
},
"responseId": "02d69b24-3684-464a-a249-90484a39621d",
"queryResult": {
"queryText": "GOOGLE_ASSISTANT_WELCOME",
"allRequiredParamsCollected": true,
"parameters": {},
"outputContexts": [],
"intent": {
"name": "7bf4f8c9-5eec-40fd-ac72-78446b376972",
"webhookState": "true",
"displayName": "nextStory"
},
"diagnosticInfo": {
"webhookLatencySeconds": 1
},
"fulfillmentText": "",
"fulfillmentMessages": [
{
"text": {
"text": []
}
}
],
"intentDetectionConfidence": 1,
"languageCode": "en-us"
},
"session": "1518537462114"
},
"version": 2,
"action": "",
"intent": "nextStory",
"parameters": {},
"contexts": {
"_session": "1518537462114",
"input": {},
"output": {}
},
"incoming": {
"parsed": []
},
"query": "GOOGLE_ASSISTANT_WELCOME",
"data": {}
}
virtual assistant response for utter(next) <= this should return additional data
{
"payload": {
"google": {
"expectUserResponse": false,
"richResponse": {
"items": [
{
"simpleResponse": {
"textToSpeech": "Sorry, I can't help you with that."
}
}
]
},
"userStorage": "{\"data\":{}}"
}
},
"outputContexts": [
{
"name": "1518537462114/contexts/_actions_on_google",
"lifespanCount": 99,
"parameters": {
"data": "{}"
}
}
]
}
data: {}
test output
1) BRANDED Google Action
Next story
Skill Launch => Next:
AssertionError: NOWPLAYINGINDEX != 1: expected undefined to equal 1
Perhaps I'm missing something here / doing it wrong? (again, I'm using media responses here)
Hi @nealrs , glad to hear that you are using the latest version. About the responses, we just grab the response generated from your endpoint and return it in the interaction, but it seems that maybe for the media responses or Dialog Flow itself it's adding some extra information in there. We are investigating on that regard.
About the context, we still don't support maintaining the context, that is the next feature we are adding, so at the moment in order to get the context to maintain between requests, you can add it to the next request by affecting the payload like this.
const context = result.outputContexts;
googleAssistant.addFilter(request => {
request.result.outputContexts = context;
});
Thanks @jperata - I did get the context passing to work after a little hackery.
But I'm a little confused that while my code works in production (Neal News is live on Google Assistant), the unit tests recognize my input (more stories / next
) and identify the intended intent (nextStory
), but even so, it throws the default fallback response (Sorry, I can't help you with that.
) instead of returning the proper response (another media object / audio player).
I know some reserved phrases (previous/pause/resume/stop/restart/cancel/exit) are processed at the device level / are treated differently than text input (and often don't fire any log output / webhook calls), but I'm eager to investigate what's causing this behavior.
It also seems like even though DialogFlow identifies the correct intent, the conv
object's query
& queryText
is always equal to GOOGLE_ASSISTANT_WELCOME
.
Even when I trigger my exit
intent with .intend('exit')
, query
& queryText
areGOOGLE_ASSISTANT_WELCOME
.
tried with the 0.2.0
release on npm, same results
Using the new
0.0.7
release on npm, I am getting the following output fromutter (talk to SKILL_NAME)
:It's awesome that the outputContext portion is included, so i can check on my user data storage, but, this is what the Actions simulator gives me:
As you can see, some of the fields are missing or nested differently. I know the mediaObject isn't supported by your package quite yet, but this missing meta data / token / debug info is important.