I am unsure as to why I am constantly facing the following errors when trying to use Logic Apps to update a document in CosmosDB:
PartitionKey extracted from document doesn't match the one specified in the header
One of the specified inputs is invalid
For error 1, I sent the following request via LogicApps:
{
"method": "post",
"headers": {
"x-ms-documentdb-is-upsert": "True",
"x-ms-documentdb-raw-partitionkey": "12347"
},
"path": "/dbs/bc-gamification-management/colls/bcpoints/docs",
"host": {
"connection": {
"name": <omitted as this shouldn't matter>
}
},
"body": {
"curr_point": 500,
"id": "12347",
"overall_point": 1400
}
}
Not too sure where I got this idea but for error 2, I omitted the partition key from the body request:
{
"method": "post",
"headers": {
"x-ms-documentdb-is-upsert": "True",
"x-ms-documentdb-raw-partitionkey": "12347"
},
"path": "/dbs/bc-gamification-management/colls/bcpoints/docs",
"host": {
"connection": {
"name": <omitted as this shouldn't matter>
}
},
"body": {
"curr_point": 500,
"overall_point": 1400
}
}
I have tried troubleshooting this using: https://learn.microsoft.com/en-us/azure/cosmos-db/sql/troubleshoot-bad-request and various other methods like, using "id" and "/id" in the Partition key value instead of the actual value of the partition key. But all these methods did not work and I am not too sure why...
FYI, the CosmosDB has items with the following sample:
{
"id": "12347",
"overall_point": 1200,
"curr_point": 300,
"_rid": <omitted as this shouldn't matter>,
"_self": <omitted as this shouldn't matter>,
"_etag": <omitted as this shouldn't matter>,
"_attachments": <omitted as this shouldn't matter>,
"_ts": <omitted as this shouldn't matter>
}
The "id" field is also the Partition Key for the Collection. Please advice :")
What you need is something like this
{
"method": "post",
"headers": {
"x-ms-documentdb-is-upsert": "True",
"x-ms-documentdb-raw-partitionkey": "\"12347\""
},
"path": "/dbs/testdb/colls/testcoll/docs",
"host": {
"connection": {
"name": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
}
},
"body": {
"curr_point": 500,
"id": "12347",
"overall_point": 1400
}
}
You need to put your partition key within quotes, like this.
Related
Here below is a definition of two Tasks, 1st one is HTTP and 2nd is HUMAN, in HTTP task need to pass workflowInstanceId and HUMAN taskId.
I am not able to find a way to get workflow contextual data. Help is appreciated.
{
"name": "fullfill_order_for_medicine",
"taskReferenceName": "fullfill_order_for_medicine",
"inputParameters": {
"http_request": {
"uri": "http://localhost:7777/orders",
"method": "PUT",
"body": {
"id": "${get_order_request.output.response.body.id}",
"name": "${get_order_request.output.response.body.name}",
"status": "complete",
"isComplete": true,
"WorkflowId":"${workflowInstanceId}",
"TaskId":"${dispatch_approval.taskId}"
}
}
},
"type": "HTTP"
},
{
"name" : "dispatch_approval",
"taskReferenceName": "dispatch_approval",
"type": "HUMAN"
}
I'm trying to use Klaviyo for the first time, and the natural starting point seems to be their new Node SDK. Unfortunately, their documentation seems to be an odd combination of extremely detailed, and laking any details.
I've set up my account and gotten my API key, but am failing to get a simple registration to succeed.
I thought I could do this:
import { ConfigWrapper, Profiles } from 'klaviyo-api';
import logger from '../utils/logger';
import requireEnvVariable from '../utils/requireEnvVariable';
export async function subscribe(profile) {
const apiKey = requireEnvVariable('KLAVIYO_PK');
ConfigWrapper(apiKey);
const user = Profiles.createProfile(profile)
.then(data => logger.debug({data}, 'Got profile data'))
.catch(error => {
logger.warn(error, 'Klaviyo error');
return error;
});
return true;
}
For profile, I'm passing in
{
"email": "me#example.com",
"phone_number": "",
"first_name": "Bob",
"last_name": "Smith"
}
but it responds with an error:
WARN: Klaviyo error
env: "development"
err: {
"type": "Error",
"message": "Bad Request",
"status": 400,
"response": {
"req": {
"method": "POST",
"url": "https://a.klaviyo.com/api/profiles/",
"data": {
"email": "me#example.com",
"phone_number": "",
"first_name": "Bob",
"last_name": "Smith"
},
"headers": {
"authorization": "Klaviyo-API-Key <my private key>",
"revision": "2022-10-17",
"user-agent": "klaviyo-api-node/1.0.2",
"content-type": "application/json",
"accept": "application/json"
}
},
"header": {
"date": "Wed, 28 Dec 2022 23:34:14 GMT",
"content-type": "application/vnd.api+json",
"content-length": "211",
"connection": "close",
"cf-ray": "780e1b056c3728f5-ORD",
"allow": "GET, POST, HEAD, OPTIONS",
"vary": "Cookie, Accept-Encoding",
"cf-cache-status": "DYNAMIC",
"cid": "UDiE82",
"ratelimit-limit": "700, 75;w=1, 700;w=60",
"ratelimit-remaining": "699",
"ratelimit-reset": "44",
"x-robots-tag": "noindex, nofollow",
"server": "cloudflare"
},
"status": 400,
"text": "{\"errors\":[{\"id\":\"5648779b-3e1a-4ccf-80c4-dc19b8b32a2c\",\"status\":400,\"code\":\"invalid\",\"title\":\"Invalid input.\",\"detail\":\"The payload provided in the request is invalid.\",\"source\":{\"pointer\":\"/data\"},\"meta\":{}}]}"
}
}
The various examples under Optional Parameters made me think that the above would Just Work; however, the docs also say, "For example values / data types, as well as whether parameters are required/optional, please reference the corresponding API Reference link," and "This SDK is a thin wrapper around our API. See our API Reference for full documentation on API behavior."
So looking at the Create Profile API Reference, I tried both
const user = Profiles.createProfile({attributes: profile})
and
const user = Profiles.createProfile({type: 'profile', attributes: profile})
but they both end up with the same error (except the reported data is updated appropriately, e.g.,
"status": 400,
"response": {
"req": {
"method": "POST",
"url": "https://a.klaviyo.com/api/profiles/",
"data": {
"type": "profile",
"attributes": {
"email": "me#example.com",
"phone_number": "",
"first_name": "Bob",
"last_name": "Smith"
}
},
What's the right way to use the new SDK? Or should I ignore this, and go back to creating manual fetch calls against their API?
How can I send some of the properties from AzureMonitorMetricAlert
The full JSON looks like:
{
"schemaId":"AzureMonitorMetricAlert",
"data": {
"version":"2.0",
"properties":null,
"status":"Active",
"context": {
"timestamp":"2019-04-30T14:19:49.4987935Z",
"id":"/subscriptions/xxxxxxxx/resourceGroups/test/providers/microsoft.insights/metricAlerts/500%20response%20code",
"name":"500 response code",
"description":"",
"conditionType":"DynamicThresholdCriteria",
"severity":"3",
"condition": {
"windowSize":"PT5M",
"allOf": [
{
"alertSensitivity":null,
"failingPeriods":null,
"ignoreDataBefore":null,
"metricName":"requests/failed",
"metricNamespace":"microsoft.insights/components",
"operator":null,
"threshold":null,
"timeAggregation":"Count",
"dimensions": [
{
"name":"ResourceId",
"value":"xxxxxxxxx"
},
{
"name":"request/resultCode",
"value":"500"
}
],
"metricValue":null
}
]
},
"subscriptionId":"xxxxxxxxxxxxxxxx",
"resourceGroupName":"test",
"resourceName":"test",
"resourceType":"microsoft.insights/components",
"resourceId":"/subscriptions/xxxxxxxxxxx/resourceGroups/test/providers/microsoft.insights/components/tests",
"portalLink":"https://portal.azure.com/#resource/subscriptions/xxxxxxxx/resourceGroups/dsdsdsdsds"
}
}
}
How can I send message to the slack including text eg: "The alert for ${context.name} was sent."
I was trying with:
"actions": {
"Post_message": {
"inputs": {
"host": {
"connection": {
"name": "#parameters('$connections')['slack']['connectionId']"
}
},
"method": "post",
"path": "/chat.postMessage",
"queries": {
"channel": "CHT0EMJ3H",
"parse": "full",
"text": "tests::::::=>>>>> #{triggerBody()?['context']['name']}"
}
},
"runAfter": {},
"type": "ApiConnection"
}
}
But it doesn't work. If I use just "text": "tests::::::=>>>>> #{triggerBody()}" the full JSON is sent but it is hard to read it since it is parsed as a string.
You can't read the properties because the output of triggerbody() is a string. so you could parse the string to Json firstly then read the properties.
So you need to use the Parse Json action, the Content is the #triggerBody() and the schema click Use sample payload to generate schema and input the Json. With this action you will be able to read the properties.
I test with HTTP Request trigger and send an mail, reading the conditionType with #{body('Parse_JSON')?['data']?['context']?['conditionType']}.
Using Node.js for the first time and working with the Youtube Data API. I can't quite get the data I want from the API when I make a request to it. This is what the data coming back from the API is supposed to look like:
/**
* API response
*/
{
"kind": "youtube#commentThreadListResponse",
"etag": "\"VPWTmrH7dFmi4s1RqrK4tLejnRI/yVL3QyyDwJFkFNOcCd4KZCcTFDw\"",
"nextPageToken": "QURTSl9pMlQySG1zcHRKb0dNZ3dWdlYtcUhyRDFDVlJXaHFmdVFiMUlaUFJfTTNjdTFpQzFNWUNuWjhBY0d2ZV8tTGR2aHFXRXRJVDZRQVpRM0YzNndWVXlQVFNwOU94UVFCWVd2empIVUlGdHlFR25keU8=",
"pageInfo": {
"totalResults": 20,
"resultsPerPage": 20
},
"items": [
{
"kind": "youtube#commentThread",
"etag": "\"VPWTmrH7dFmi4s1RqrK4tLejnRI/OqxtT8nFAjcFFrHa4DbZrY_NItM\"",
"id": "z13bwzmokuzcxtcqn04cclqbiozixldh21o"
},
{
"kind": "youtube#commentThread",
"etag": "\"VPWTmrH7dFmi4s1RqrK4tLejnRI/1B_usKd_ZpCLxG5l5nL7QfUtG3o\"",
"id": "z13puhijunbzytdcn22lstwptmybyzwdl"
},
{
"kind": "youtube#commentThread",
"etag": "\"VPWTmrH7dFmi4s1RqrK4tLejnRI/h8sS5KTOFa7CQWU5Je2Fp5UQ0bk\"",
"id": "z13dfbwzjyrpiznqc04cgjlpbyn0wtaiqpw0k"
},
{
"kind": "youtube#commentThread",
"etag": "\"VPWTmrH7dFmi4s1RqrK4tLejnRI/FQEl6XU95FHiM1ijRxC5fqngmqk\"",
"id": "z12atro51wfhzvmp104cihfytveyshbr4s40k"
},
{ ...........AND SO ON
I then use the following code in an attempt to console.log() this data from the youtube API
var DATABASE = youtube.commentThreads.list(
{ 'videoId': '7YcW25PHnAA', 'part': 'id, replies'}, function(err, data){
if(err){
console.error('Error: ' + err);
}
});
var resp = JSON.stringify(DATABASE);
console.log(resp);
But this is my output instead:
{
"uri": {
"protocol": "https:",
"slashes": true,
"auth": null,
"host": "www.googleapis.com",
"port": null,
"hostname": "www.googleapis.com",
"hash": null,
"search": "?videoId=7YcW25PHnAA&part=id%2C%20replies&key=AIzaSyDTTnj4HncXQCM3U-9XUvHyIf7kE9f2ZUk",
"query": "videoId=7YcW25PHnAA&part=id%2C%20replies&key=AIzaSyDTTnj4HncXQCM3U-9XUvHyIf7kE9f2ZUk",
"pathname": "/youtube/v3/commentThreads",
"path": "/youtube/v3/commentThreads?videoId=7YcW25PHnAA&part=id%2C%20replies&key=AIzaSyDTTnj4HncXQCM3U-9XUvHyIf7kE9f2ZUk",
"href": "https://www.googleapis.com/youtube/v3/commentThreads?videoId=7YcW25PHnAA&part=id%2C%20replies&key=AIzaSyDTTnj4HncXQCM3U-9XUvHyIf7kE9f2ZUk"
},
"method": "GET",
"headers": {
"User-Agent": "google-api-nodejs-client/0.10.0",
"host": "www.googleapis.com",
"accept": "application/json"
}
I've a task of taking data from SQL and uploading the data as a CSV file up to an FTP server.
Now I've done this for a single SQL row just fine. The problem I'm having is looping over all rows (foreach loop) and inserting these rows as the content of the CSV file. I've tried a FTP Create File Task inside a foreach loop, but I can only access a single row at a time to set as the file's content - I need all the rows!
Also to keep in mind is that these files will have 200k+ rows.
I could of course just write a C# console app for this but the ease at which I got this far without writing any code makes it seem like it will be a worthwhile endeavor.
We recently added "Table" primitive for this scenario, support in designer is still work in progress, but you can use it in code view.
In below scenario, I'm getting rows from a table in SQL Azure, producing an CSV with two columns using data from the SQL query (First Name, Last Name), then send it via e-mail.
"Get_rows": {
"inputs": {
"host": {
"api": {
"runtimeUrl": "https://logic-apis-southcentralus.azure-apim.net/apim/sql"
},
"connection": {
"name": "#parameters('$connections')['sql']['connectionId']"
}
},
"method": "get",
"path": "/datasets/default/tables/#{encodeURIComponent(encodeURIComponent('[SalesLT].[Customer]'))}/items",
"queries": {
"$top": 10
}
},
"runAfter": {},
"type": "ApiConnection"
},
"tableCsv0": {
"inputs": {
"columns": [
{
"header": "First Name",
"value": "#item()?['FirstName']"
},
{
"header": "Last Name",
"value": "#item()?['LastName']"
}
],
"format": "csv",
"from": "#body('Get_rows')?['value']"
},
"runAfter": {
"Get_rows": [
"Succeeded"
]
},
"type": "Table"
},
"Send_an_email": {
"inputs": {
"body": {
"Body": "#body('tableCsv0')",
"Subject": "Subject",
"To": "deli#microsoft.com"
},
"host": {
"api": {
"runtimeUrl": "https://logic-apis-southcentralus.azure-apim.net/apim/office365"
},
"connection": {
"name": "#parameters('$connections')['office365']['connectionId']"
}
},
"method": "post",
"path": "/Mail"
},
"runAfter": {
"tableCsv0": [
"Succeeded"
]
},
"type": "ApiConnection"
}
So just following up to show how Derek's answer helped me with my problem to get a large number of rows to up to a file on an FTP server. I ended up using the output body of the Execute Stored Procedure action as the GetRows action was limited to 512 rows.
NOTE: As the Table action is not available in the designer, yet, do everything in the code viewer, opening the designer caused issues and deleted all my code at one point.
"actions": {
"Create_file": {
"inputs": {
"body": "#body('tableCsv0')",
"host": {
"api": {
"runtimeUrl": "https://logic-apis-northeurope.azure-apim.net/apim/ftp"
},
"connection": {
"name": "#parameters('$connections')['ftp']['connectionId']"
}
},
"method": "post",
"path": "/datasets/default/files",
"queries": {
"folderPath": "transactions/ready/ecommerce/tickets_test/",
"name": "grma_tickets_#{formatDateTime(utcNow(),'yyyyMMdd_hhmmss')}.csv"
}
},
"runAfter": {
"tableCsv0": [
"Succeeded"
]
},
"type": "ApiConnection"
},
"Execute_stored_procedure": {
"inputs": {
"host": {
"api": {
"runtimeUrl": "https://logic-apis-northeurope.azure-apim.net/apim/sql"
},
"connection": {
"name": "#parameters('$connections')['sql']['connectionId']"
}
},
"method": "post",
"path": "/datasets/default/procedures/#{encodeURIComponent(encodeURIComponent('[Scheduledjob].[GetBArcodesForGRMA]'))}"
},
"runAfter": {},
"type": "ApiConnection"
},
"tableCsv0": {
"inputs": {
"columns": [
{
"header": "EventDateTime",
"value": "#item()?['EventDateTime']"
},
{
"header": "EventName",
"value": "#item()?['EventName']"
}
],
"format": "csv",
"from": "#body('Execute_stored_procedure')['ResultSets']['Table1']"
},
"runAfter": {
"Execute_stored_procedure": [
"Succeeded"
]
},
"type": "Table"
}