I'm using node.js bigquery client library and need to get a list of tables from а dataset without partitioning block.
For example, I have a number of partitioned tables:
table1_20170101
table1_20170102
...
table1_20170131
table2_20170101
table2_20170102
...
table2_20170131
I need to get [table1,table2] as a result but using getTables method I get [table1_20170101,table1_20170102...]
Script example below:
dataset.getTables(function (err, tables) {
let result = [];
for (let key in tables) {
result.push(tables[key].id);
}
console.log(result);
res.send(result);
});
Is there any available method to get "unpartitioned" table names?
Getting all tables with _date, split and make it unique seems to be very slow if there're a lot of partitioned tables.
You could perform a query against the __TABLES_SUMMARY__ table, instead of using the getTables method.
The sample below gets all the tables in a Dataset, splits the name on the _ character and takes the first part. It then creates a distinct list.
bigquery.query({
query: [
'SELECT DISTINCT SPLIT(table_id,"_")[ORDINAL(1)] as tableName',
'FROM `DATASETNAME.__TABLES_SUMMARY__`;'
].join(' '),
params: []
}, function(err, rows) {
let result = [];
for (row of rows) {
result.push(row.tableName);
}
console.log(result);
});
You could use a meta query:
select * from `wr_live.__TABLES_SUMMARY__`
Related
I'm programming a website to search in a sqlite database. I'm using node-sqlite3 in my backend, this is my code.
let query = `SELECT * FROM "main"."SPTOT" WHERE lower("Country") LIKE 'australia' ESCAPE '\\' ORDER BY Country, Operator, "Basic Type", Serial, Date`;
db.all(query, (err, rows) => {
if (err) throw err;
res.json({
rows: rows,
query: query
});
});
The result is sorted by date, but it should sort by Country, Operator, BasicType, Serial and then Date.
When I'm using DB Browser for SQLite with the exact same query, the results are sorted like they should be.
From node-sqlite3
From DB Browser
I don't know if this is an issue from node-sqlite3 or something else. If this is an issue in node-sqlite3, is there another way to sort te results how I want to in javascript?
So, I created a sorting function to sort it right after running the query and getting the results. I reported this issue to the github repository.
This is the sorting function.
function sort(array, keysInObj) {
let elements = keysInObj.reverse();
for (let e of elements) {
array.sort((a, b) => `${a[e]}`.localeCompare(b[e]));
}
return array;
}
This is now my code.
let query = `SELECT * FROM "main"."SPTOT" WHERE lower("Country") LIKE 'australia' ESCAPE '\\' ORDER BY Country, Operator, "Basic Type", Serial, Date`;
db.all(query, (err, rows) => {
if (err) throw err;
rows = sort(rows, ["Country", "Operator", "Basic Type", "Serial", "Date"]);
res.json({
rows: rows,
query: query
});
});
In a Cosmos DB stored procedure, I'm using a inline sql query to try and retrieve the distinct count of a particular user id.
I'm using the SQL API for my account. I've run the below query in Query Explorer in my Cosmos DB account and I know that I should get a count of 10 (There are 10 unique user ids in my collection):
SELECT VALUE COUNT(1) FROM (SELECT DISTINCT c.UserId FROM root c) AS t
However when I run this in the Stored Procedure portal, I either get 0 records back or 18 records back (total number of documents). The code for my Stored Procedure is as follows:
function GetDistinctCount() {
var collection = getContext().getCollection();
var isAccepted = collection.queryDocuments(
collection.getSelfLink(),
'SELECT VALUE COUNT(1) FROM (SELECT DISTINCT c.UserId FROM root c) AS t',
function(err, feed, options) {
if (err) throw err;
if (!feed || !feed.length) {
var response = getContext().getResponse();
var body = {code: 404, body: "no docs found"}
response.setBody(JSON.stringify(body));
} else {
var response = getContext().getResponse();
var body = {code: 200, body: feed[0]}
response.setBody(JSON.stringify(body));
}
}
)
}
After looking at various feedback forums and documentation, I don't think there's an elegant solution for me to do this as simply as it would be in normal SQL.
the UserId is my partition key which I'm passing through in my C# code and when I test it in the portal, so there's no additional parameters that I need to set when calling the Stored Proc. I'm calling this Stored Proc via C# and adding any further parameters will have an effect on my tests for that code, so I'm keen not to introduce any parameters if I can.
Your problem is caused by that you missed setting partition key for your stored procedure.
Please see the statements in the official document:
And this:
So,when you execute a stored procedure under a partitioned collection, you need to pass the partition key param. It's necessary! (Also this case explained this:Documentdb stored proc cross partition query)
Back to your question,you never pass any partition key, equals you pass an null value or "" value for partition key, so it outputs no data because you don't have any userId equals null or "".
My advice:
You could use normal Query SDK to execute your sql, and set the enableCrossPartitionQuery: true which allows you scan entire collection without setting partition key. Please refer to this tiny sample:Can't get simple CosmosDB query to work via Node.js - but works fine via Azure's Query Explorer
So I found a solution that returns the result I need. My stored procedure now looks like this:
function GetPaymentCount() {
var collection = getContext().getCollection();
var isAccepted = collection.queryDocuments(
collection.getSelfLink(),
'SELECT DISTINCT VALUE(doc.UserId) from root doc' ,
{pageSize:-1 },
function(err, feed, options) {
if (err) throw err;
if (!feed || !feed.length) {
var response = getContext().getResponse();
var body = {code: 404, body: "no docs found"}
response.setBody(JSON.stringify(body));
} else {
var response = getContext().getResponse();
var body = {code: 200, body: JSON.stringify(feed.length)}
response.setBody(JSON.stringify(body));
}
}
)
}
Essentially, I changed the pageSize parameter to -1 which returned all the documents I knew would be returned in the result. I have a feeling that this will be more expensive in terms of RU/s cost, but it solves my case for now.
If anyone has more efficient alternatives, please comment and let me know.
I have the following node js code that should list all items from a DynamoDB table,
import * as dynamoDbLib from "../../libs/dynamodb-lib";
import { success, failure } from "../../libs/response-lib";
export async function main(event, context) {
const params = {
TableName: "brands",
KeyConditionExpression: "brandId = :brandId",
ExpressionAttributeValues: {
":brandId": ''
}
};
try {
const result = await dynamoDbLib.call("query", params);
return success(result.Items);
} catch (e) {
console.log(e);
return failure({ status: false });
}
}
The id is in uuid format which when inserted from my node js was imported by using:
import uuid from "uuid";
then inserted to the table like:
brandId: uuid.v1()
Now when I query the items in the table I can only get only one record if and only if I hard coded the uuid of a record in the expression attribute value (either the KeyConditions or KeyConditionExpression parameter must be specified). So I thought about adding a regular expression to match all the uuids, my regular expression was copied from some solutions on the web but it didn't work, it was like the following:
[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}
and
\b[0-9a-f]{8}\b-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-\b[0-9a-f]{12}\b
and I have tried other examples but non of them worked, is it right to add a regular expression to get all the items, and if so what is the right regex for it?
Use a Scan operation to get all items in a table.
From the AWS Developer Guide:
The scan method reads every item in the table and returns all the data in the table. You can provide an optional filter_expression, so that only the items matching your criteria are returned. However, the filter is applied only after the entire table has been scanned.
I am working with NodeJS on Google App Engine with the Datastore database.
Note that this question is an extension of (not a duplicate) my original post.
Due to the fact that Datastore does not have support the OR operator, I need to run multiple queries and combine the results.
Here is my approach (based on the selected answer from my original post):
Use Keys-only queries in the 1st stage
Perform the combination of the keys obtained into a single list (including deduplication)
Obtaining the entities simply by key lookup
I have achieved #1 by running two separate queries with the async parallel module. I need help with step 2.
Question: How to combine (union) two lists of entity keys into a single list (including de-duplication) efficiently?
The code I have below successfully performs both the queries and returns two objects getEntities.requesterEntities and getEntities.dataownerEntities.
//Requirement: Get entities for Transfer Requests that match either the Requester OR the Dataowner
async.parallel({
requesterEntities: function(callback) {
getEntitiesByUsername('TransferRequest', 'requester_username', loggedin_username, (treqs_by_requester) => {
//Callback pass in response as parameter
callback(null, treqs_by_requester)
});
},
dataownerEntities: function(callback) {
getEntitiesByUsername('TransferRequest', 'dataowner_username', loggedin_username, (treqs_by_dataowner) => {
//Callback pass in response as parameter
callback(null, treqs_by_dataowner)
});
}
}, function(err, getEntities) {
console.log(getEntities.requesterEntities);
console.log(getEntities.dataownerEntities);
//***HOW TO COMBINE (UNION) BOTH OBJECTS CONTAINING DATASTORE KEYS?***//
});
function getEntitiesByUsername(kind, property_type, loggedin_username, getEntitiesCallback) {
//Create datastore query
const treq_history = datastore.createQuery(kind);
//Set query conditions
treq_history.filter(property_type, loggedin_username);
treq_history.select('__key__');
//Run datastore query
datastore.runQuery(treq_history, function(err, entities) {
if(err) {
console.log('Transfer Request History JSON unable to return data results for Transfer Request. Error message: ', err);
} else {
getEntitiesCallback(entities);
}
});
}
I was able to combine the two separate sets of entity keys by iterating over the arrays and comparing the ID values for each entity key and creating a new array with the unique keys.
Note: The complete solution is posted as an answer to my original post.
//Union of two arrays of objects entity keys
function unionEntityKeys(arr1, arr2) {
var arr3 = [];
for(var i in arr1) {
var shared = false;
for (var j in arr2)
if (arr2[j][datastore.KEY]['id'] == arr1[i][datastore.KEY]['id']) {
shared = true;
break;
}
if(!shared) {
arr3.push(arr1[i])
}
}
arr3 = arr3.concat(arr2);
return arr3;
}
I am new to Cassandra, so I might be missing something very simple.
I started off with a simple nodejs application that retrieves and displays all rows from a columnfamily. And if I run the following:
pool.connect(function(err, keyspace){
if(err){
throw(err);
} else {
pool.cql("SELECT * FROM tweets", function(err, results){
console.log( err, results );
datatext = results;
socket.emit('tweets',datatext);
});
}
});
All I get are the data for the first two columns, which are indexed. No data from other columns are shown. Whereas if I login to cassandra-cli and do a list tweets, I see data from all columns.
Any idea why this may be happening?
which version of CQL are you using ? and what is your table structure ?
maybe you can try this:
results.forEach(function(row){
//each row
row.forEach(function(name,value,ts,ttl){
//each column
console.log(name,value,ts,ttl);
});
});