I want to create resources like CosmosDB, Azure Kubernetes service, etc
I went through the following document :
https://learn.microsoft.com/en-us/rest/api/resources/resources/create-or-update
I see that the request URL has parameters like :-
https://management.azure.com/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}?api-version=2021-04-01
Where can I find the values for the fields like resourceProviderNamespace, parentResourcePath, resourceType, etc for each of the resources like cosmosDB, AKS, etc?
Also the properties that each of the resources expect, like location, backup, etc ??
As suggested by Gaurav Mantri, you can refer to Azure FarmBeats control plane and data plane operations
Thank you and AnuragSharma-MSFT and MarcoPapst-5675. Posting your suggestion as an answer to help community members.
You can refer to the following script for CosmosDB role assignment:
{
"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"roleDefinitionId": {
"type": "string",
"metadata": {
"description": "Name of the Role Definition"
}
},
"roleAssignmentName": {
"type": "string",
"metadata": {
"description": "Name of the Assignment"
}
},
"scope": {
"type": "string",
"metadata": {
"description": "Scope of the Role Assignment"
}
},
"principalId": {
"type": "string",
"metadata": {
"description": "Object ID of the AAD identity. Must be a GUID."
}
}
},
"variables": { },
"resources": [
{
"name": "[concat(parameters('roleAssignmentName'), '/', guid(parameters('scope')))]",
"type": "Microsoft.DocumentDB/databaseAccounts/sqlRoleAssignments",
"apiVersion": "2021-04-15",
"properties": {
"roleDefinitionId": "[parameters('roleDefinitionId')]",
"principalId": "[parameters('principalId')]",
"scope": "[parameters('scope')]"
}
}
]
}
You can refer to Create a CosmosDB Role Assignment using an ARM Template, Azure REST API get resource parentResourcePath parameter and Resources - Get By Id
First of all you need clientID, clientSecret and tenentID of your Azure account. Make sure you have given reuired permission to access and modify azure resources via API. Hint Use rbac command in azure cloudshell.
// Get Access token to use azure management API end points.
public static string GetAzureAccessToken()
{
// Get Access token for azure api
string AccessToken = "";
var tenantId = System.Configuration.ConfigurationManager.AppSettings["AzureTenantID"];
var clientId = System.Configuration.ConfigurationManager.AppSettings["AzureClientID"];
var secret = System.Configuration.ConfigurationManager.AppSettings["AzureSecret"];
var resourceUrl = "https://management.azure.com/";
var requestUrl = $"https://login.microsoftonline.com/{tenantId}/oauth2/token";
// in real world application, please use Typed HttpClient from ASP.NET Core DI
var httpClient = new System.Net.Http.HttpClient();
var dict = new Dictionary<string, string>
{
{ "grant_type", "client_credentials" },
{ "client_id", clientId },
{ "client_secret", secret },
{ "resource", resourceUrl }
};
var requestBody = new System.Net.Http.FormUrlEncodedContent(dict);
var response = httpClient.PostAsync(requestUrl, requestBody).Result;
if (response != null)
{
response.EnsureSuccessStatusCode();
string responseContent = response.Content.ReadAsStringAsync().Result;
if (!string.IsNullOrEmpty(responseContent))
{
var TokenResponse = JsonConvert.DeserializeObject<AzureTokenResponseModel>(responseContent);
if (TokenResponse != null)
{
AccessToken += TokenResponse?.access_token;
}
}
}
return AccessToken;
}
// create azure resource group by calling API
var clientResourceGroup = new RestClient($"https://management.azure.com/subscriptions/{SubscriptionID}/resourcegroups/{ResourceGroupName}?api-version=2022-05-01");
clientResourceGroup.Timeout = -1;
var requestResourceGroup = new RestRequest(Method.PUT);
requestResourceGroup.AddHeader("Authorization", "Bearer " + GetAzureAccessToken());
requestResourceGroup.AddHeader("Content-Type", "application/json");
var bodyResourceGroup = #"{'location':'" + AzLocation + #"','Name':'" + ResourceGroupName + #"'}}";
requestResourceGroup.AddParameter("application/json", bodyResourceGroup, ParameterType.RequestBody);
IRestResponse responseResourceGroup = clientResourceGroup.Execute(requestResourceGroup);
Post down here if still facing difficulties. We can create other azure resources like storage account, functions, app service etc..
There is a NET5 console app that is logging from warning and above. This is the documentation followed but it is not working to log information types. It does log warnings. How to change the log level to information?
class Program
{
static async Task Main(string[] args)
{
var services = new ServiceCollection();
var startup = new Startup();
startup.ConfigureServices(services);
var serviceProvider = services.BuildServiceProvider();
var executor = serviceProvider.GetService<IExecutor>();
await executor.ExecuteTestsAsync();
}
}
public class Startup
{
public IConfiguration Configuration { get; }
public Startup()
{
var builder = new ConfigurationBuilder()
.AddJsonFile("appsettings.json");
Configuration = builder.Build();
}
public void ConfigureServices(IServiceCollection services)
{
services.AddSingleton(Configuration);
services.AddLogging();
services.AddApplicationInsightsTelemetryWorkerService();
services.AddSingleton<IExecutor, Executor>();
}
}
public Executor(ILogger<Executor> logger)
{
logger.LogInformation("Hello");//Not logged in AppInsights
...
appsettings.json
{
"ApplicationInsights":
{
"InstrumentationKey": "putinstrumentationkeyhere"
},
"Logging":
{
"LogLevel":
{
"Default": "Information"
}
}
}
Also tried this:
services.AddLogging(loggingBuilder => loggingBuilder.AddFilter<Microsoft.Extensions.Logging.ApplicationInsights.ApplicationInsightsLoggerProvider>("Category", LogLevel.Information));
You need to specify the log level for application insights seperately:
{
"ApplicationInsights":
{
"InstrumentationKey": "putinstrumentationkeyhere"
},
"Logging":
{
"LogLevel":
{
"Default": "Information"
},
"ApplicationInsights": {
"LogLevel": {
"Default": "Information"
}
}
}
}
(source)
I have an Azure Function with AzureFunctionVersion v3 and for some time now I don't see any logs in Application Insights or in the Rider Console that are not logged in the FunctionTrigger itself.
[FunctionName("test")]
public IActionResult Test([HttpTrigger(AuthorizationLevel.Function, "delete", Route = BasePath + "/{roomId}")] HttpRequest req,
ILogger log)
{
log.LogInformation("Log is shown");
myservice.DoIt();
}
namespace MyApp.Test.Service {
public MyService(ILoggerFactory loggerFactory)
{
_log = loggerFactory.CreateLogger(GetType().Namespace);
}
public void DoIt() {
_log.LogInformation("Log is not shown");
}
}
My host.json looks like:
"logging": {
"applicationInsights": {
"samplingExcludedTypes": "Request",
"samplingSettings": {
"isEnabled": false
}
},
"logLevel": {
"MyApp.*": "Information"
}
}
Please try to change the logLevel in one of the following ways:
"logLevel": {"MyApp": "Information"}
"logLevel": { "Default": "Information" }
I'm trying the specify task hub name for my durable function following the documentation.
Here are steps I've done:
host.json
{
"version": "2.0",
"extensions": {
"durableTask": {
"hubName": "%TaskHubName%"
},
// ...
}
}
settings.json
{
"Values": {
"AzureWebJobsStorage": "connection string",
"FUNCTIONS_WORKER_RUNTIME": "dotnet",
"TaskHubName": "mytaskhub"
}
// ...
}
MyFunction.cs
public async Task<IActionResult> Run(
[HttpTrigger(AuthorizationLevel.Function, "post", Route = "foo/bar")]
[RequestBodyType(typeof(List<QueryParams>), "Query Parameters")] HttpRequest req,
[OrchestrationClient(TaskHub = "%TaskHubName%")] DurableOrchestrationClient starter)
However I don't see any changes in my Azure Storage Account after running this function. What I'm doing wrong?
I realized that I simply didn't update the tables list...everything is working, sorry for disruption.
We have an API that will be used to provision certain resources in AWS using Cloud Formation. This includes a Lambda function that will send events to S3, with the bucket being configurable. The thing is, we will know the bucket name when we provision the lambda, not within the lambda code itself.
As far as I can tell, there is no way to inject the S3 bucket name at the time of provisioning, in the Cloud Formation Template itself. Is that true?
The only solution I can see is to generate the function code on the fly, and embed that into the Cloud Formation template. This would make us unable to use any NPM dependencies along with the function code. Is there a better option?
So, I realized I had never updated this question with my eventual solution. I ended up embedding a proxy lambda function into the cloudformation template, which enabled me to inject template parameters.
Example:
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Creates a function to relay messages from a Kinesis instance to S3",
"Parameters": {
"S3Bucket" : {
"Type": "String",
"Description": "The name of the S3 bucket where the data will be stored"
},
"S3Key": {
"Type": "String",
"Description": "The key of the directory where the data will be stored"
}
},
"Resources": {
"mainLambda": {
"Type" : "AWS::Lambda::Function",
"Properties" : {
"Handler" : "index.handler",
"Description" : "Writes events to S3",
"Role" : { "Ref": "LambdaRoleARN" },
"Runtime" : "nodejs4.3",
"Code" : {
"S3Bucket": "streams-resources",
"S3Key": "astro-bass/${GIT_COMMIT}/lambda/astro-bass.zip"
}
}
},
"lambdaProxy": {
"Type" : "AWS::Lambda::Function",
"Properties" : {
"Handler" : "index.handler",
"Runtime" : "nodejs",
"Code" : {
"ZipFile": { "Fn::Join": ["", [
"var AWS = require('aws-sdk');",
"var lambda = new AWS.Lambda();",
"exports.handler = function(event, context) {",
"event.bundledParams = ['",
{ "Ref": "S3Bucket" },
"','",
{ "Ref": "S3Key" },
"'];",
"lambda.invoke({",
"FunctionName: '",
{ "Ref": "mainLambda" },
"',",
"Payload: JSON.stringify(event, null, 2),",
"InvocationType: 'Event'",
"}, function(err, data) {",
"if(err) {",
"context.fail(err);",
"}",
"context.done();",
"});",
"};"
]]}
}
}
},
},
...
}
The proxy function had the parameters injected into its code (s3bucket/key), and then it invokes the main lambda with a modified event object. It's a little unorthodox but struck me as much cleaner than the other available solutions, such as parse stacknames/etc. Worked well thus far.
Note that this solution only works currently with the legacy node environment. Not an issue, but worrisome in terms of the longevity of this solution.
UPDATE:
We ran into limitations with the previous solution and had to devise yet another one. We ended up with an off-label usage of the description field to embed configuration values. Here is our Lambda
'use strict';
var aws = require('aws-sdk');
var lambda = new aws.Lambda({apiVersion: '2014-11-11'});
let promise = lambda.getFunctionConfiguration({ FunctionName: process.env['AWS_LAMBDA_FUNCTION_NAME'] }).promise();
exports.handler = async function getTheConfig(event, context, cb) {
try {
let data = await promise;
cb(null, JSON.parse(data.Description).bucket);
} catch(e) {
cb(e);
}
};
Then, in the description field, you can embed a simple JSON snipped like so:
{
"bucket": "bucket-name"
}
Moreover, this structure, using the promise outside of the handler, limits the request to only occurring when the container is spawned - not for each individual lambda execution.
Not quite the cleanest solution, but the most functional one we've found.
There is no way of passing parameters to a Lambda function beside the event itself at the moment.
If you are creating a Lambda function with CloudFormation you could use the following workaround:
Use the Lambda function name to derive the CloudFormation stack name.
Use the CloudFormation stack name to access resources, or parameters of the stack when executing the Lambda function.
I would suggest doing it like this.
First create an index.js file and add this code.
var AWS = require('aws-sdk');
const s3 = new AWS.S3();
const https = require('https');
exports.handler = (event, context, callback) => {
const options = {
hostname: process.env.ApiUrl,
port: 443,
path: '/todos',
method: 'GET'
};
const req = https.request(options, (res) => {
console.log('statusCode:', res.statusCode);
console.log('headers:', res.headers);
res.on('data', (d) => {
process.stdout.write(d);
});
});
req.on('error', (e) => {
console.error(e);
});
req.end();
};
Zip the index.js file and upload it to an S3 bucket in the same region as your lambda function.
Then use this Cloudformation template make sure you specific the correct bucket name.
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "ApiWorkflow",
"Metadata": {
},
"Parameters": {
"ApiUrl": {
"Description": "Specify the api url",
"Type": "String",
"Default": "jsonplaceholder.typicode.com"
},
},
"Mappings": {
},
"Conditions": {
},
"Resources": {
"lambdaVodFunction": {
"Type": "AWS::Lambda::Function",
"Properties": {
"Code": {
"S3Bucket": "lamdba-exec-tests",
"S3Key": "index.js.zip"
},
"Handler": "index.handler",
"Role": "arn:aws:iam::000000000:role/BasicLambdaExecRole",
"Runtime": "nodejs10.x",
"FunctionName": "ApiWorkflow",
"MemorySize": 128,
"Timeout": 5,
"Description": "Texting Lambda",
"Environment": {
"Variables": {
"ApiUrl": {
"Ref": "ApiUrl"
},
"Test2": "Hello World"
}
},
}
}
},
"Outputs": {
"ApiUrl": {
"Description": "Set api url",
"Value": {
"Ref": "ApiUrl"
}
}
}
}
You should see in the template Environmental variables you can access these in your NodeJS Lambda function like this.
process.env.ApiUrl