I am using this gcloud command to stop my sql instance following this
gcloud sql instances patch my-sql-instance --activation-policy NEVER
how can I achieve the same in nodejs code?
is there a google library for the sql api in npm?
using googleapis
see also the api reference
const {google} = require('googleapis');
const {auth} = require('google-auth-library');
const sqladmin = google.sqladmin('v1beta4');
auth.getApplicationDefault((_err, authRes) => {
var req = {
auth: authRes.credential,
project: "my-project-id",
instance: "my-instance",
requestBody: {
"settings": {
"activationPolicy": "NEVER"
}
}
};
sqladmin.instances.patch(req, (err, res) => {
if (err) console.error(err);
if (res) console.info(res);
})
});
Related
I have uploaded my API project (Node.js project) to AWS ECS container and my project contains swagger documentation. In swagger I want to indicate the current host Ip address that the API is run on but I cannot find the right code to fetch it. There is a solution for that? since I have managed to implement it on .NetCore API.
How does it looks right now:
Thx in advance.
You can make use of AWS ECS metadata endpoint http://172.17.0.1:51678/v1/metadata from an ECS task to fetch details about the container instance. The details fetched can then be used to get the private/public ip address of the instance. Example:
import http from 'http';
import util from 'util';
import AWS from 'aws-sdk';
export const getIPAddresses = async () => {
try {
let options: any = {
hostname: '172.17.0.1',
port: 51678,
path: '/v1/metadata',
method: 'GET'
}
let containerInstanceDetails: any = await httpGet(options);
containerInstanceDetails = JSON.parse(containerInstanceDetails);
const cluster = containerInstanceDetails["Cluster"];
const containerInstanceArn = containerInstanceDetails["ContainerInstanceArn"];
const containerInstanceUUID = containerInstanceArn.split('/')[2];
let params: any = {
cluster: cluster,
containerInstances: [containerInstanceUUID]
}
if (!AWS.config.region) {
AWS.config.update({
region: <your_aws_region>
});
}
const ecs = new AWS.ECS({ 'region': <your_aws_region> });
const ec2 = new AWS.EC2({ 'region': <your_aws_region> });
const describeContainerInstancesAsync = util.promisify(ecs.describeContainerInstances).bind(ecs);
const describeInstancesAsync = util.promisify(ec2.describeInstances).bind(ec2);
let data = await describeContainerInstancesAsync(params);
const ec2InstanceId = data.containerInstances[0].ec2InstanceId;
params = {
InstanceIds: [
ec2InstanceId
]
}
data = await describeInstancesAsync(params);
return [data.Reservations[0].Instances[0].PrivateIpAddress, data.Reservations[0].Instances[0].PublicIpAddress];
}
catch(err) {
console.log(err);
}
}
async function httpGet(options) {
return new Promise((resolve, reject) => {
http.get(options, response => {
response.setEncoding('utf8');
response.on('data', data => {
resolve(data);
});
}).on('error', error => {
reject(error.message);
});
});
}
I am running the following code:
const { initializeApp } = require('firebase-admin/app');
const { getFirestore } = require('firebase-admin/firestore');
const {firestore} = require("firebase-admin");
const QuerySnapshot = firestore.QuerySnapshot;
initializeApp()
const db = getFirestore();
const initializeListener = (collectionName) => {
console.log('called function');
const query = db.collection(collectionName);
query.onSnapshot((querySnapshot) => {
querySnapshot.docs().
console.log('snapshot received');
querySnapshot.docChanges().forEach((change) => {
console.log('doc change found');
if (change.type === "added") {
console.log("New " + collectionName, change.doc.data());
}
});
}, (erry) => {
console.log(`Encountered error: ${err}`);
});
}
initializeListener('my_collection');
If running whilst offline I don't see the 'snapshot received' message until I go online. If offline persistence should be available here, how do I access it?
You are using the Firebase Admin SDK (a wrapper around the Google Cloud backend SDK), which does not have any sort of persistence on any platform. Offline persistence is only available for the web and client SDKs provided by Firebase. As you can see from the linked documentation:
Note: Offline persistence is supported only in Android, Apple, and web apps.
Given the protocol buffers definition available at: https://github.com/googleapis/googleapis/blob/master/google/home/graph/v1/homegraph.proto
How can one call the Home Graph API RPC endpoint using gRPC on Node.js to multiplex concurrent API method calls over a single connection?
You can use #grpc-js in combination with Application Default Credentials to initialize the credentials.
$ npm install #grpc/grpc-js
$ npm install google-auth-library
$ export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account-key.json
const grpc = require('#grpc/grpc-js');
const { GoogleAuth } = require('google-auth-library');
async function getCredentials() {
const sslCredentials = grpc.credentials.createSsl();
const googleAuth = new GoogleAuth({
scopes: 'https://www.googleapis.com/auth/homegraph'
});
const authClient = await googleAuth.getClient();
const callCredentials = grpc.credentials.createFromGoogleCredential(
authClient
);
const credentials = grpc.credentials.combineChannelCredentials(
sslCredentials,
callCredentials
);
return credentials;
}
Use google-proto-files with #grpc/proto-loader to load the Home Graph service protobuf definition with its dependencies.
const protoLoader = require('#grpc/proto-loader');
const protos = require('google-proto-files');
async function getHomegraph() {
const homegraphProto = await protoLoader.load(
protos.getProtoPath('home/graph', 'v1', 'homegraph.proto'), {
includeDirs: [protos.getProtoPath('..')]
}
);
const homegraph = grpc.loadPackageDefinition(
homegraphProto
).google.home.graph.v1;
return homegraph;
}
And finally initialize client stubs to call the HomeGraphApiService methods.
(async function() {
const credentials = await getCredentials();
const homegraph = await getHomegraph();
const homegraphService = new homegraph.HomeGraphApiService(
'homegraph.googleapis.com', credentials
);
homegraphService.sync({
agentUserId: 'AGENT_USER_ID'
}, function(err, result) {
if (err) {
console.error(err);
} else {
console.log(result);
}
});
// homegraphService.query();
// homegraphService.requestSyncDevices();
// homegraphService.reportStateAndNotification();
// homegraphService.deleteAgentUser();
})();
Note that by default the Channel implementation will reuse existing channels from a global pool if the parameters (address, credentials and options) are the same. You can alter this behavior with the grpc.use_local_subchannel_pool option.
I have a Cloud Function written in the Node.js v8 that uses the #google-cloud/bigquery v1.3.0 library.
I like it, I'm able to perform BigQuery changes such as creating a view using the very simple code below without worry about promises and it's synchronous.
const bigquery = new BigQuery({projectId: 'my-project'});
const options = {
view: {
query: 'SELECT * FROM `my-project.my-datatset.my-table`',
useLegacySql: false
}
};
results = await bigquery
.dataset('my-datatset')
.createTable('my-view', options);
But I've been unable to work out how this code can be modified to perform a patch operations. I would expect a very similar syntax to be available but I can't find it. E.g. none of the examples below work:
//bigquery.dataset(datasetId).patchTable(viewId,options);
//bigquery.dataset(datasetId).table(viewId).patch(options);
//bigquery.dataset(datasetId).tables(viewId).patch(options);
I'm able to do the patch operation I want using the rest API through Googles reference documents. But I just can't find a code solution that's consistent with the approach above.
Any ideas?
This solution is longer and asynchronous, but it seems to work. In case anyone runs into the same problem
var {google} = require('googleapis');
var bigQuery = google.bigquery("v2")
google.auth.getApplicationDefault(function(err, authClient) {
if (err) {
//Handle error
}
if (authClient.createScopedRequired && authClient.createScopedRequired()) {
var scopes = [
//Either scope is sufficient according to the spec.
//https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/patch
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/bigquery'
];
authClient = authClient.createScoped(scopes);
}
var request = {
projectId: 'my-project',
datasetId:'my-datatset',
tableId:'my-view',
resource: {
view: {
query: 'SELECT * FROM `my-project.my-datatset.my-table`',
useLegacySql: false
}
},
// Auth client
auth: authClient
};
tables = bigQuery.tables;
tables.patch(request, function(err, response) {
if (err) {
//Handle error
} else {
//Print response
}
});
});
I'm trying to interact with Azure China Resources with Nodejs.
But could not find out how to authenticate the application with Azure China with nodejs SDK.
Tried #azure/arm-storage
import * as msRestNodeAuth from "#azure/ms-rest-nodeauth";
import { StorageManagementClient, StorageManagementModels, StorageManagementMappers } from "#azure/arm-storage";
const subscriptionId = process.env["AZURE_SUBSCRIPTION_ID"];
msRestNodeAuth.interactiveLogin().then((creds) => {
const client = new StorageManagementClient(creds, subscriptionId);
client.operations.list().then((result) => {
console.log("The result is:");
console.log(result);
});
}).catch((err) => {
console.error(err);
});
To connect to Azure China, you will need to specify the environment. Please see the code below:
const msRestNodeAuth = require("#azure/ms-rest-nodeauth");
const Environment = require("#azure/ms-rest-azure-env").Environment;
const mooncake = Environment.ChinaCloud;
msRestNodeAuth.interactiveLogin({
environment: mooncake
}).then((creds) => {
console.log(creds);
}).catch((err) => {
console.error(err);
});