Azure heat map not rendered from GeoJson datasource - azure

I am new to the azure maps and try to generate a weighted heat map to show population in particular areas. I am using the following code, if I use my GeoJSON as data source, no heat map layer is rendered. If I use the earthquake URL, I can see the heat map layer.
map.events.add('ready', function () {
map.events.add('load', function (e) {
datasource = new atlas.source.DataSource();
map.sources.add(datasource);
//datasource.importDataFromUrl('https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.geojson');
alert(geoJson);
datasource.add(geoJson);
map.layers.add(new atlas.layer.HeatMapLayer(datasource, null, {
radius: 50,
color: [
'interpolate',
['linear'],
['heatmap-density'],
0, 'rgba(33,102,172,0)',
0.2, 'rgb(103,169,207)',
0.4, 'rgb(209,229,240)',
0.6, 'rgb(253,219,199)',
0.8, 'rgb(239,138,98)',
1, 'rgb(178,24,43)'
]
}), 'labels');
});
});
My GeoJSON is as below:
{
"type":"FeatureCollection",
"features":[
{
"type":"Feature",
"properties":{
"density":"50"
},
"geometry":{
"type":"Point",
"coordinates":[
51.5570726284386,
25.3115021617515
]
}
},
{
"type":"Feature",
"properties":{
"density":"50"
},
"geometry":{
"type":"Point",
"coordinates":[
51.5570726284386,
25.3115021617515
]
}
},
{
"type":"Feature",
"properties":{
"density":"10"
},
"geometry":{
"type":"Point",
"coordinates":[
51.5570726284386,
25.391562807081
]
}
},
{
"type":"Feature",
"properties":{
"density":"10"
},
"geometry":{
"type":"Point",
"coordinates":[
51.5570726284386,
25.391562807081
]
}
},
{
"type":"Feature",
"properties":{
"density":"10"
},
"geometry":{
"type":"Point",
"coordinates":[
35.4343604091702,
33.9136459680463
]
}
},
{
"type":"Feature",
"properties":{
"density":"10"
},
"geometry":{
"type":"Point",
"coordinates":[
35.5220012295491,
33.8847298539905
]
}
},
{
"type":"Feature",
"properties":{
"density":"40"
},
"geometry":{
"type":"Point",
"coordinates":[
51.4729695383047,
25.2856697056661
]
}
},
{
"type":"Feature",
"properties":{
"density":"20"
},
"geometry":{
"type":"Point",
"coordinates":[
35.4343604091702,
33.7574366679259
]
}
},
{
"type":"Feature",
"properties":{
"density":"50"
},
"geometry":{
"type":"Point",
"coordinates":[
35.4343604091702,
33.7574366679259
]
}
},
{
"type":"Feature",
"properties":{
"density":"10"
},
"geometry":{
"type":"Point",
"coordinates":[
51.4729695383047,
25.2856697056661
]
}
},
{
"type":"Feature",
"properties":{
"density":"10"
},
"geometry":{
"type":"Point",
"coordinates":[
35.5220012295491,
33.8847298539905
]
}
}
]
}
Can anyone please check and advise if there's anything wrong in the code or the geoJson?
Thanks.

Using the code you provided and the sample data I'm able to see the heatmap render. If you are adding more code to try and take into account your density property, for example having an option like this in your heatmap:
weight: ['get', 'density']
you might not see much difference since your density properties are strings and not numbers.
That said, your dataset is really small, so heat maps won't look like much to begin with.

I was not parsing GeoJSON, when I changed the following line, it worked.
datasource.add(JSON.parse(geoJson));

Related

PyMongo get analytics by field for given query

I have documents in mongo db, like
doc = {
name = MyName,
tags = tag1,tag2,tag3,
...
}
When I search documents by name, I also want to get analytics of tags, for docs with that name, like
{
tag1: 7,
tag2: 5,
...
tagn: 14
}
How can I aggregate it?
The data model complicates the query somewhat and the required output format complicates it even more ... but here's one way to do it.
db.collection.aggregate([
{
"$set": {
"tags": {
"$split": ["$tags", ","]
}
}
},
{"$unwind": "$tags"},
{
"$set": {
"tags": {
"$trim": {"input": "$tags"}
}
}
},
{
"$group": {
"_id": "$tags",
"count": {"$count": {}}
}
},
{
"$sort": {"_id": 1}
},
{
"$group": {
"_id": null,
"newRoot": {
"$mergeObjects": {
"$arrayToObject": [
[
{
"$reduce": {
"input": {"$objectToArray": "$$ROOT"},
"initialValue": {},
"in": {
"$mergeObjects": [
"$$value",
{
"$switch": {
"branches": [
{
"case": {"$eq": ["$$this.k", "_id"]},
"then": {"k": "$$this.v"}
},
{
"case": {"$eq": ["$$this.k", "count"]},
"then": {"v": "$$this.v"}
}
],
"default": "$$value"
}
}
]
}
}
}
]
]
}
}
}
},
{"$replaceWith": "$newRoot"}
])
Example output:
[
{
"tag1": 2,
"tag2": 2,
"tag3": 3,
"tag5": 1,
"tag7": 1
}
]
Try it on mongoplayground.net.

multiple updates for an array with $push

consider the following document skeleton
{
_id: "615749dce3438547adfff9bc",
items: [
{
type: "shirt",
color: "red",
sizes: [
{
label: "medium",
stock: 10,
price: 20,
},
{
label: "large",
stock: 30,
price: 40,
}
]
},
{
type: "shirt",
color: "green",
sizes: [
{
label: "small",
stock: 5,
price: 3,
},
{
label: "medium",
stock: 5,
price: 3,
},
]
}
]
}
when a new item comes in, I want to insert a new document to items, unless an item exists with the same type and color as the new one, in this case I want only to merge sizes into that existing item's sizes.
sizes does not have to be unique.
I tried to use $push with upsert: true and arrayFilters but apparently $push ignores arrayFilters.
node with mongodb package.
Query1
filter to see if exists
if exists map to update, else add in the end
*2 array reads, but stil faster than query2
Test code here
db.collection.update({},
[
{
"$set": {
"newitem": {
"type": "shirt",
"color": "red",
"sizes": [
{
"label": "medium"
}
]
}
}
},
{
"$set": {
"found": {
"$ne": [
{
"$filter": {
"input": "$items",
"cond": {
"$and": [
{
"$eq": [
"$$this.type",
"$newitem.type"
]
},
{
"$eq": [
"$$this.color",
"$newitem.color"
]
}
]
}
}
},
[]
]
}
}
},
{
"$set": {
"items": {
"$cond": [
{
"$not": [
"$found"
]
},
{
"$concatArrays": [
"$items",
[
"$newitem"
]
]
},
{
"$map": {
"input": "$items",
"in": {
"$cond": [
{
"$and": [
{
"$eq": [
"$$this.type",
"$newitem.type"
]
},
{
"$eq": [
"$$this.color",
"$newitem.color"
]
}
]
},
{
"$mergeObjects": [
"$$this",
{
"sizes": {
"$concatArrays": [
"$$this.sizes",
"$newitem.sizes"
]
}
}
]
},
"$$this"
]
}
}
}
]
}
}
},
{
"$unset": [
"found",
"newitem"
]
}
])
Query2
(alternative solution)
reduce and do the update
if found keep the updated, else add in the end
*1 array read (but concat is slow, for big arrays, >500 members, if you have big arrays use query1)
*this is the normal way to do it, if we had a fast way to add in the end of the array, but we dont, so Query1 is faster
Test code here
db.collection.update({},
[
{
"$set": {
"newitem": {
"type": "shirt",
"color": "red",
"sizes": [
{
"label": "medium"
}
]
}
}
},
{
"$set": {
"items-found": {
"$reduce": {
"input": "$items",
"initialValue": {
"items": [],
"found": null
},
"in": {
"$cond": [
{
"$and": [
{
"$eq": [
"$$value.found",
null
]
},
{
"$eq": [
"$$this.type",
"$newitem.type"
]
},
{
"$eq": [
"$$this.color",
"$newitem.color"
]
}
]
},
{
"items": {
"$concatArrays": [
"$$value.items",
[
{
"$mergeObjects": [
"$$this",
{
"sizes": {
"$concatArrays": [
"$$this.sizes",
"$newitem.sizes"
]
}
}
]
}
]
]
},
"found": true
},
{
"items": {
"$concatArrays": [
"$$value.items",
[
"$$this"
]
]
},
"found": "$$value.found"
}
]
}
}
}
}
},
{
"$set": {
"items": {
"$cond": [
"$items-found.found",
"$items-found.items",
{
"$concatArrays": [
"$items-found.items",
[
"$newitem"
]
]
}
]
}
}
},
{
"$unset": [
"items-found",
"newitem"
]
}
])

Mongo Query to get a result

My mongo collection name tests and whose having the following documents in it.
[
{
"title": "One",
"uid": "1",
"_metadata": {
"references": [
{
"uid": "2"
},
{
"asssetuid": 10
}
]
}
},
{
"title": "Two",
"uid": "2",
"_metadata": {
"references": [
{
"uid": "3"
},
{
"asssetuid": 11
}
]
}
},
{
"title": "Three",
"uid": "3",
"_metadata": {
"references": []
}
}
]
And I want the result in the following format (for uid:1)
[
{
"title": "One",
"uid": 1,
"_metadata": {
"references": [
{
"asssetuid": 10
},
{
"asssetuid": 11
},
{
"title": "Two",
"uid": "2",
"_metadata": {
"references": [
{
"title": "Three",
"uid": "3"
}
]
}
}
]
}
}
]
for uid:2 I want the following result
[
{
"title": "Two",
"uid": 2,
"_metadata": {
"references": [
{
"asssetuid": 11
},
{
"title": "Three",
"uid": "3"
}
]
}
}
]
Which query I used here to get a respected result. according to its uid. here I want the result in the parent-child relationship. is this possible using MongoDB graph lookup query or any other query that we can use to get the result. Please help me with this.
New Type Output
[{
"title": "One",
"uid": 1,
"_metadata": {
"assets": [{
"asssetuid": 10,
"parent": 1
}, {
"asssetuid": 11,
"parent": 2
}],
"entries": [{
"title": "Two",
"uid": "2",
"parent": 1
}, {
"title": "Three",
"uid": "3",
"parent": 2
}]
}
}]
Mongo supports the automatic reference resolution using $ref but for that, you need to change your schema a little and resolve resolution is only supported by some drivers.
You need to store your data in this format:
[
...
{
"_id": ObjectId("5a934e000102030405000000"),
"_metadata": {
"references": [
{
"$ref": "collection",
"$id": ObjectId("5a934e000102030405000001"),
"$db": "database"
},
{
"asssetuid": 10
}
]
},
"title": "One",
"uid": "1"
},
....
]
For more details on $ref refer to official documentation: label-document-references
OR
you can resolve the reference using the $graphLookup but the only problem with the $graphlookup is that you will lose the assetuid. Here is the query and it will resolve references and give output in flat map
db.collection.aggregate([
{
$match: {
uid: "1"
}
},
{
$graphLookup: {
from: "collection",
startWith: "$_metadata.references.uid",
connectFromField: "_metadata.references.uid",
connectToField: "uid",
depthField: "depth",
as: "resolved"
}
},
{
"$addFields": {
"references": "$resolved",
"metadata": [
{
"_metadata": "$_metadata"
}
]
}
},
{
"$project": {
"references._metadata": 0,
}
},
{
"$project": {
"references": "$references",
"merged": {
"$concatArrays": [
"$metadata",
"$resolved"
]
}
}
},
{
"$project": {
results: [
{
merged: "$merged"
},
{
references: "$references"
}
]
}
},
{
"$unwind": "$results"
},
{
"$facet": {
"assest": [
{
"$match": {
"results.merged": {
"$exists": true
}
}
},
{
"$unwind": "$results.merged"
},
{
"$unwind": "$results.merged._metadata.references"
},
{
"$match": {
"results.merged._metadata.references.asssetuid": {
"$exists": true
}
}
},
{
"$project": {
_id: 0,
"asssetuid": "$results.merged._metadata.references.asssetuid"
}
}
],
"uid": [
{
"$match": {
"results.references": {
"$exists": true
}
}
},
{
"$unwind": "$results.references"
},
{
$replaceRoot: {
newRoot: "$results.references"
}
}
]
}
},
{
"$project": {
"references": {
"$concatArrays": [
"$assest",
"$uid"
]
}
}
}
])
Here is the link to the playground to test it: Mongo Playground

How Do I calculate percentage on array field using mongodb aggregation?

I have following documents
users = [
{
type: 'A',
name: 'anil',
logins: [
{ at: '2-3-2019', device: 'mobile' },
{ at: '3-3-2019', device: 'desktop' },
{ at: '4-3-2019', device: 'tab' },
{ at: '5-3-2019', device: 'mobile' }
]
},
{
type: 'A',
name: 'rakesh',
logins: [
{ at: '2-3-2019', device: 'desktop' },
{ at: '3-3-2019', device: 'mobile' },
{ at: '4-3-2019', device: 'desktop' },
{ at: '5-3-2019', device: 'tab' }
]
},
{
type: 'A',
name: 'rahul',
logins: [
{ at: '2-3-2019' device: 'tab' },
{ at: '3-3-2019' device: 'mobile' },
{ at: '4-3-2019' device: 'tab' },
{ at: '5-3-2019' device: 'tab' }
]
}
]
I need to calculate percentage of device used by each user which of type "A".
if we look at user anil the device usage is,
mobile: 50%
desktop: 25%
tab: 25%
the highest usage is mobile device with 50% usage, So it should consider as mobile device.
Like above the final output would be,
[
{
name: 'anil',
device: 'mobile',
logins: 50%
},
{
name: 'rakesh',
device: 'desktop',
logins: 50%
},
{
name: 'rahul',
device: 'tab',
logins: 75%
}
]
Thanks for any help.
You can use below aggregation
Here overall logic is to find the duplicates inside the array, after that you just need to do is $map over the logins array to calculate the percentage of the device using the formula
(numberOfDevices * 100) / total size of the array
It will be better if you just remove one by one stage from the below aggregation to understand it.
db.collection.aggregate([
{ "$match": { "type": "A" }},
{ "$addFields": {
"logins": {
"$arrayToObject": {
"$map": {
"input": { "$setUnion": ["$logins.device"] },
"as": "m",
"in": {
"k": "$$m",
"v": {
"$divide": [
{
"$multiply": [
{ "$size": {
"$filter": {
"input": "$logins",
"as": "d",
"cond": {
"$eq": ["$$d.device", "$$m"]
}
}
}},
100
]
},
{ "$size": "$logins" }
]
}
}
}
}
}
}}
])
MongoPlayground
[
{
"_id": ObjectId("5a934e000102030405000000"),
"logins": {
"desktop": 25,
"mobile": 50,
"tab": 25
},
"name": "anil",
"type": "A"
},
{
"_id": ObjectId("5a934e000102030405000001"),
"logins": {
"desktop": 50,
"mobile": 25,
"tab": 25
},
"name": "rakesh",
"type": "A"
},
{
"_id": ObjectId("5a934e000102030405000002"),
"logins": {
"mobile": 25,
"tab": 75
},
"name": "rahul",
"type": "A"
}
]
Exact output-> Here I have just find the $max element from the array of object after getting the percentage of all devices.
db.collection.aggregate([
{ "$match": { "type": "A" }},
{ "$addFields": {
"logins": {
"$map": {
"input": { "$setUnion": ["$logins.device"] },
"as": "m",
"in": {
"k": "$$m",
"v": {
"$divide": [
{
"$multiply": [
{ "$size": {
"$filter": {
"input": "$logins",
"as": "d",
"cond": {
"$eq": ["$$d.device", "$$m"]
}
}
}},
100
]
},
{ "$size": "$logins" }
]
}
}
}
}
}},
{ "$replaceRoot": {
"newRoot": {
"$mergeObjects": [
"$$ROOT",
{
"$arrayElemAt": [
"$logins",
{
"$indexOfArray": [
"$logins.v",
{ "$max": "$logins.v" }
]
}
]
}
]
}
}},
{ "$project": { "logins": 0 }}
])
MongoPlayground
[
{
"_id": ObjectId("5a934e000102030405000000"),
"k": "mobile",
"name": "anil",
"type": "A",
"v": 50
},
{
"_id": ObjectId("5a934e000102030405000001"),
"k": "desktop",
"name": "rakesh",
"type": "A",
"v": 50
},
{
"_id": ObjectId("5a934e000102030405000002"),
"k": "tab",
"name": "rahul",
"type": "A",
"v": 75
}
]

Return multiple objects with $group(aggregation)

I have a collection similar to this:
[
{
"_id":1,
"name":"breakfast",
"time":"10.00"
},
{
"_id":3,
"name":"lunch",
"time":"12.07"
},
{
"_id":2,
"name":"breakfast",
"time":"10.10"
},
{
"_id":4,
"name":"lunch",
"time":"12.45"
}
]
I want to aggregate into something like this:
{
"breakfast":[
{
"_id":1,
"name":"breakfast",
"time":"10.00"
},
{
"_id":2,
"name":"breakfast",
"time":"10.10"
}
],
"lunch":[
{
"_id":3,
"name":"lunch",
"time":"12.07"
},
{
"_id":4,
"name":"lunch",
"time":"12.45"
}
]
}
I have only managed to group them but I can't change the key meals to either breakfast or lunch depending on the meal.name(group name)
$group: {
_id: { meal: '$meal.name' },
meals: { $push: '$meal' },
}
Using the above code I have managed to produce the output below. My only challenge is changing the key meals to either breakfast or lunch as explained above in the subgroups.
{
"meals":[
{
"_id":1,
"name":"breakfast",
"time":"10.00"
},
{
"_id":2,
"name":"breakfast",
"time":"10.10"
}
],
"meals":[
{
"_id":3,
"name":"lunch",
"time":"12.07"
},
{
"_id":4,
"name":"lunch",
"time":"12.45"
}
]
}
Here you can have your answer .
After "grouping" to add to an array you similarly $push all that content into array by the "name" grouping key and then convert into keys of a document in a $replaceRoot with $arrayToObject:
db.collection.aggregate([
{ "$group": {
"_id": "$name",
"data": { "$push": "$$ROOT" }
}},
{ "$group": {
"_id": null,
"data": {
"$push": {
"k": "$_id",
"v": "$data"
}
}
}},
{ "$replaceRoot": {
"newRoot": { "$arrayToObject": "$data" }
}}
])
OUTPUT
[
{
"breakfast": [
{
"_id": 1,
"name": "breakfast",
"time": "10.00"
},
{
"_id": 2,
"name": "breakfast",
"time": "10.10"
}
],
"lunch": [
{
"_id": 3,
"name": "lunch",
"time": "12.07"
},
{
"_id": 4,
"name": "lunch",
"time": "12.45"
}
]
}
]
You can check the result of above query in this LINK

Resources