Azure HyperVGeneration V2 VM creation - azure

I'm looking to spin up VMs using the node/Javascript SDK for Azure. So far I've had luck with my code and it pretty much works as expected. However I would like to spin up generation 2 / HyperV v2 VMs instead of the default v1. Here's a snippet of my code:
return new Promise((resolve, reject) => {
let resourceClient = new ComputeManagementClient(credentials, azureSubscriptionID);
resourceClient.virtualMachines.createOrUpdate(reourceGroup, name, {
location: location,
osProfile: { computerName: name, adminUsername: 'admin', adminPassword: adminPassword, customData: Buffer.from(prepScript).toString('base64') },
hardwareProfile: { vmSize: 'Standard_B2s' },
HyperVGeneration: 'V2',
storageProfile: {
imageReference: { publisher: 'Canonical', offer: 'UbuntuServer', sku: '18.04-LTS', version: 'latest' },
osDisk: { name: name + '-disk', createOption: 'FromImage' }
},
networkProfile: {
networkInterfaces: [{ id: nic.id, primary: true }]
}
}, function (err, result) {
if (err) {
reject(err);
} else {
resolve(result);
}
});
}
The problem I'm having is with:
HyperVGeneration: 'V2'
as it seems to not even apply. Nor do I get any errors, just a V1 VM is created. The Azure Docs are kind of lacking here: https://learn.microsoft.com/en-us/javascript/api/#azure/arm-compute/hypervgeneration?view=azure-node-latest
I've also tried the other parameters such as HyperVGenerationType / Types with the same result.

I figured it out!
I was working off the wrong assumption here. Instead of configuring the VM generation/HyperVGeneration to V2, somehow using a gen2 image sku to base the OS disk off of, automatically configures the VM generation to V2 as well. It also doesn't help the fact that MS calls the same thing by multiple names: VM generation, HyperV generation, image generation.
Anyway the solution is to actually look up all available images which can be installed and pick the correct one from there.
To get available images for your region:
az vm image list --all --publisher 'Canonical' --sku '18_04' --output table
Which returns something like:
Offer Publisher Sku Urn Version
-------------------------------------------- ----------- ---------------------------- ------------------------------------------------------------------------------------------- ---------------
0001-com-ubuntu-confidential-vm-experimental Canonical 18_04 Canonical:0001-com-ubuntu-confidential-vm-experimental:18_04:18.04.20210309 18.04.20210309
0001-com-ubuntu-confidential-vm-experimental Canonical 18_04-gen2 Canonical:0001-com-ubuntu-confidential-vm-experimental:18_04-gen2:18.04.20210309 18.04.20210309
0001-com-ubuntu-pro-advanced-sla Canonical 18_04 Canonical:0001-com-ubuntu-pro-advanced-sla:18_04:18.04.20200318 18.04.20200318
0001-com-ubuntu-pro-advanced-sla Canonical 18_04 Canonical:0001-com-ubuntu-pro-advanced-sla:18_04:18.04.20200605 18.04.20200605
...
Another thing of note is that some Ubuntu images have the sku as "18.04" and some "18_04" which further confuses things.
The final solution was this use this snippet in order to put it all together:
return new Promise((resolve, reject) => {
let resourceClient = new ComputeManagementClient(credentials, azureSubscriptionID);
resourceClient.virtualMachines.createOrUpdate(reourceGroup, name, {
location: location,
osProfile: { computerName: name, adminUsername: 'admin', adminPassword: adminPassword, customData: Buffer.from(prepScript).toString('base64') },
hardwareProfile: { vmSize: 'Standard_B2s' },
storageProfile: {
imageReference: { publisher: 'Canonical', offer: 'UbuntuServer', sku: '18_04-lts-gen2', version: 'latest' },
osDisk: { name: name + '-disk', createOption: 'FromImage' }
},
networkProfile: {
networkInterfaces: [{ id: nic.id, primary: true }]
}
}, function (err, result) {
if (err) {
reject(err);
} else {
resolve(result);
}
});
}
Notice how the image sku is 18_04-lts-gen2 instead of 18.04-LTS.

Related

Dependency issue with Azure Bicep resource modules

I am creating three resources: cosmos account, cosmos database, and cosmos containers. I have all of them within one folder except for the containers where I modularized it.
main.bicep
// Cosmos DB Account
resource cosmos 'Microsoft.DocumentDB/databaseAccounts#2020-06-01-preview' = if (deployDB) {
name: cosmosAccountName
location: location
tags: appTags
kind: 'GlobalDocumentDB'
identity: {
type: 'None'
}
properties: {
...
}
dependsOn: [
virtualNetwork
]
}
// Cosmos SQL Database
resource cosmosdb 'Microsoft.DocumentDB/databaseAccounts/sqlDatabases#2020-04-01' = if (deployDB) {
name: '${cosmos.name}/${databaseName}'
properties: {
resource: {
id: databaseName
}
options: {
throughput: 400
}
}
}
// Cosmos DB Containers
module cosmosContainersUser '../../../cicd/bicep/modules/datastore/cosmos-db.bicep' = [for container in cosmosContainers: if (deployDB) {
name: container.containerName
params: {
cosmosContainerName: container.name
id: container.id
partitionKey: container.partitionKey
}
}]
cosmos-db.bicep
param cosmosContainerName string
param id string
param partitionKey string
// Cosmos Containers
resource cosmosContainerUser 'Microsoft.DocumentDB/databaseAccounts/sqlDatabases/containers#2021-06-15' = {
name: cosmosContainerName
properties: {
resource: {
id: id
indexingPolicy: {
indexingMode: 'consistent'
automatic: true
includedPaths:[
{
path: '/*'
}
]
excludedPaths: [
{
path: '/"_etag"/?'
}
]
}
partitionKey: {
kind: 'Hash'
paths: [
partitionKey
]
}
conflictResolutionPolicy: {
mode: 'LastWriterWins'
conflictResolutionPath: '/_ts'
}
}
}
}
This works no problem however main.bicep is still massive and I want to keep modularizing it and am having trouble moving the other resources into cosmos-db.bicep. If I was to add the cosmos database resource into the file:
cosmos-db.bicep
param databaseName string
param cosmosDbName string
param cosmosContainerName string
param id string
param partitionKey string
// Cosmos Database
resource cosmosdb 'Microsoft.DocumentDB/databaseAccounts/sqlDatabases#2020-04-01' = {
name: cosmosDbName
properties: {
resource: {
id: databaseName
}
options: {
throughput: 400
}
}
}
// Cosmos Containers
resource cosmosContainerUser 'Microsoft.DocumentDB/databaseAccounts/sqlDatabases/containers#2021-06-15' = {
name: cosmosContainerName
properties: {
resource: {
id: id
indexingPolicy: {
indexingMode: 'consistent'
automatic: true
includedPaths:[
{
path: '/*'
}
]
excludedPaths: [
{
path: '/"_etag"/?'
}
]
}
partitionKey: {
kind: 'Hash'
paths: [
partitionKey
]
}
conflictResolutionPolicy: {
mode: 'LastWriterWins'
conflictResolutionPath: '/_ts'
}
}
}
}
main.bicep
module cosmosdb '../../../cicd/bicep/modules/datastore/cosmos-db.bicep' = {
name: 'cosmosDbName'
params: {
cosmosDbName: 'cosmosDbName'
databaseName: databaseName
}
}
I get a red line under the params keyword in both cosmosdb and cosmosContainersUser modules in main.bicep. For cosmosdb module it says: The specified "object" declaration is missing the following required properties: "cosmosContainerName", "id", "partitionKey".bicep(BCP035) and the params keyword in cosmosContainersUser module says The specified "object" declaration is missing the following required properties: "cosmosDbName", "databaseName".bicep(BCP035).
I'm guessing this is a dependency issue since these resources rely on one another. I can't use the parent key word in main.bicep because modules don't support it. I try to use dependsOn and I tried adding parent to the cosmosContainerUser in cosmos-db.bicep and still get the same error messages.

Creating AWS S3 object life cycle using NodeJS

Creating AWS S3 object life cycle using NodeJS.
I want to create S3 object life cycle via API using NodeJS. When I see the documentation, AWS provided only multiple object life cycle, with Java.
https://docs.aws.amazon.com/AmazonS3/latest/userguide/how-to-set-lifecycle-configuration-intro.html
I also checked this url -
https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#getBucketLifecycle-property
Genral Concern
How to set multiple Transition with NodeJS like the way Java has ?
BucketLifecycleConfiguration.Rule rule2 = new BucketLifecycleConfiguration.Rule()
.withId("Archive and then delete rule")
.withFilter(new LifecycleFilter(new LifecycleTagPredicate(new Tag("archive", "true"))))
.addTransition(new Transition().withDays(30).withStorageClass(StorageClass.StandardInfrequentAccess))
.addTransition(new Transition().withDays(365).withStorageClass(StorageClass.Glacier))
.withExpirationInDays(3650)
.withStatus(BucketLifecycleConfiguration.ENABLED);
Followed by -
https://docs.aws.amazon.com/AmazonS3/latest/userguide/how-to-set-lifecycle-configuration-intro.html
Any help would be great.
we need to call putBucketLifecycle and pass Rules Array to LifecycleConfiguration. Similar to CLI Example
s3.putBucketLifecycle(
{
Bucket: "sample-temp-bucket",
LifecycleConfiguration: {
Rules: [
{
Filter: {
And: {
Prefix: "myprefix",
Tags: [
{
Value: "mytagvalue1",
Key: "mytagkey1",
},
{
Value: "mytagvalue2",
Key: "mytagkey2",
},
],
},
},
Status: "Enabled",
Expiration: {
Days: 1,
},
},
{
Filter: {
Prefix: "documents/",
},
Status: "Enabled",
Transitions: [
{
Days: 365,
StorageClass: "GLACIER",
},
],
Expiration: {
Days: 3650,
},
ID: "ExampleRule",
},
],
},
},
(error, result) => {
if (error) console.log("error", error);
if (result) console.log("result", result);
}
);

Gcloud compute api createVM not creating public ip

I use "#google-cloud/compute": "^2.1.0" to create a VM, and I set 'ONE_TO_ONE_NAT' as shown below.
The issue is that the VM is not created with a public IP.
If I add http:true in the config, then the public IP is created but I would like to avoid the http tag in the config.
For me it looks like a bug as it should work according to the documentation.
Any idea why it is not working? (apart from a bug)
const config = {
//http: true,
machineType: machineType,
disks: [
{
boot: true,
initializeParams: {
sourceImage: SOURCE_IMAGE_PREFIX + projectId + SOURCE_IMAGE_PATH,
},
},
],
networkInterfaces: [
{
network: 'global/networks/default',
accessConfigs: {
type: 'ONE_TO_ONE_NAT',
name: 'External NAT',
},
},
],
metadata: {
items: [
{
key: 'startup-script',
value: `#! /bin/bash
sudo docker run ...
},
],
}
};
It's just a mistake, that took lot of time to find!!!
accessConfigs is an array!! add [], like this
accessConfigs: [{
type: 'ONE_TO_ONE_NAT',
name: 'External NAT',
}],
An empty access_config block would assign an external ephemeral IP to your instance.
network_interface {
network = "default"
access_config {}
}

TagSpecifications with requestSpotInstances UnexpectedParameter with aws-sdk

I'm trying to add a tag to my AWS Spot Request. But it has returned me { UnexpectedParameter: Unexpected key 'TagSpecifications' found in params.LaunchSpecification.
I have followed this documentation, and I have already tried to move this code out of LaunchSpecification, but the error persists.
const params = {
InstanceCount: 1,
LaunchSpecification: {
ImageId: config.aws.instanceAMI,
KeyName: 'backoffice',
InstanceType: config.aws.instanceType,
SecurityGroupIds: [config.aws.instanceSecurityGroupId],
TagSpecifications: [{
ResourceType: 'instance',
Tags: [{
Key: 'Type',
Value: 'Mongo-Dump',
}],
}],
BlockDeviceMappings: [{
DeviceName: '/dev/xvda',
Ebs: {
DeleteOnTermination: true,
SnapshotId: 'snap-06e838ce2a80337a4',
VolumeSize: 50,
VolumeType: 'gp2',
Encrypted: false,
},
}],
IamInstanceProfile: {
Name: config.aws.instanceProfileIAMName,
},
Placement: {
AvailabilityZone: `${config.aws.region}a`,
},
},
SpotPrice: config.aws.instancePrice,
Type: 'one-time',
};
return ec2.requestSpotInstances(params).promise();
Something makes me think that the problem is in the documentation or in the aws-sdk for Javascript itself. My options are exhausted.
The error message is correct. According to the documentation, the RequestSpotLaunchSpecification object doesn't have an attribute called TagSpecifications.
However, you can tag your Spot Instance request after you create it.
ec2.requestSpotInstances(params) returns an array of SpotInstanceRequest objects, each containing a spotInstanceRequestId (e.g. sir-012345678). Use the CreateTags API with these Spot Instance request ids to add the tags.
const createTagParams = {
Resources: [ 'sir-12345678' ],
Tags: [
{
Key: 'Type',
Value: 'Mongo-Dump'
}
]
};
ec2.createTags(createTagParams, function(err, data) {
// ...
});

Creating azure VM from image with Node SDK

I'm trying to use the azure sdk (azure-sdk-for-node) to create a virtual machine based on an image i've already saved. I've also already created the service.
Here is what I've got:
// Create a virtual machine in the cloud service.
computeManagementClient.virtualMachines.createDeployment('prerender-pro', {
name: "prerender-pro",
deploymentSlot: "Production",
label: "for heavy duty caching",
roles: [{
roleName: "prerender-pro",
roleType: "PersistentVMRole",
label: "for heavy duty caching",
oSVirtualHardDisk: {
sourceImageName: "prerender-os-2014-07-16",
mediaLink: "https://XXXXXXX.blob.core.windows.net/vhds/prerender-os-2014-07-16.vhd"
},
dataVirtualHardDisks: [],
configurationSets: [{
configurationSetType: "LinuxProvisioningConfiguration",
adminUserName: "Blah",
adminPassword: "Blahblah2014!",
computerName: 'prerender-pro',
enableAutomaticUpdates: true,
resetPasswordOnFirstLogon: false,
storedCertificateSettings: [],
inputEndpoints: []
}, {
configurationSetType: "NetworkConfiguration",
subnetNames: [],
storedCertificateSettings: [],
inputEndpoints: [{
localPort: 3389,
protocol: "tcp",
name: "RemoteDesktop"
}]
}]
}]
}, function (err, result) {
if (err) {
console.error(err);
} else {
console.info(result);
}
});
And the error I'm getting is this. I follow the example in the github readme almost exactly. Not sure why this is an issue.
{ [Error: A computer name must be specified.]
code: 'BadRequest',
statusCode: 400,
requestId: '9206ea1e591eb4dd8ea21a9196da5d74' }
Thanks!
It turns out that the error message is inaccurate. When deploying a Linux instance, only the "HostName" is required when defining the Configuration Set. "ComputerName" applies only to Windows instances. Here's an example of C# code:
ConfigurationSet configSet = new ConfigurationSet
{
HostName = "VMTest",
UserName="xxxxx",
UserPassword="xxxx",
ConfigurationSetType = ConfigurationSetTypes.LinuxProvisioningConfiguration
}

Resources