pyspark error: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.newAPIHadoopRDD - apache-spark

When I tried to search in Spark to Elasticsearch an error ocurred
The code that i use is the following:
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark import SparkConf
from pyspark.sql import SparkSession
body2="""{
"query": {
"bool": {
"must": [
{
"range": {
"#timestamp": {
"lte": "2022-05-05T09:25:15.000-03:00",
"gte": "2022-05-04T09:25:15.000-03:00"
}
}
},
{
"match": {
"type.keyword": "TABLA1"
}
}
]
}
},
"size":10
}"""
sconf = SparkConf().setAppName("Test")
sc = SparkContext.getOrCreate(sconf);
es_read_conf = {
"es.nodes": "10.48.17.67",
"es.port": "9200",
"es.query": body2,
"es.resource" : "Incice1/TABLA1",
"es.nodes.wan.only": "true",
"es.net.http.auth.user": "user1",
"es.net.http.auth.pass": "Askjd56jd8"
}
es_rdd = sc.newAPIHadoopRDD(
inputFormatClass="org.elasticsearch.hadoop.mr.EsInputFormat",
keyClass="org.apache.hadoop.io.NullWritable",
valueClass="org.elasticsearch.hadoop.mr.LinkedMapWritable",
conf=es_read_conf)
The error that I get when excecute the code in pyspark:
... inputFormatClass="org.elasticsearch.hadoop.mr.EsInputFormat",
keyClass="org.apache.... hadoop.io.NullWritable",
valueClass="org... .elasticsearch.hadoop.mr.LinkedMapWritable",
conf... =es_read_conf)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/spark/python/pyspark/context.py", line 859, in newAPIHadoopRDD
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
File "/opt/spark/python/lib/py4j-0.10.9.3-src.zip/py4j/java_gateway.py", line 1321, in __call__
File "/opt/spark/python/pyspark/sql/utils.py", line 111, in deco
return f(*a, **kw)
File "/opt/spark/python/lib/py4j-0.10.9.3-src.zip/py4j/protocol.py", line 326, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.newAPIHadoopRDD.
: java.lang.ClassNotFoundException: org.elasticsearch.hadoop.mr.LinkedMapWritable
at java.base/java.net.URLClassLoader.findClass(URLClassLoader.java:476)
at java.base/java.lang.ClassLoader.loadClass(ClassLoader.java:589)
at java.base/java.lang.ClassLoader.loadClass(ClassLoader.java:522)
at java.base/java.lang.Class.forName0(Native Method)
at java.base/java.lang.Class.forName(Class.java:398)
at org.apache.spark.util.Utils$.classForName(Utils.scala:216)
(...)
I get a search in devtools of elasticsearch but when I search in pyspark I get this error
Thanks for hear and the response

Related

Retrieve data from Tuya sdk

I use that guide to retrieve data from my Fingerbot: https://www.home-assistant.io/integrations/tuya/
I also use that repo to extract the data:
https://github.com/redphx/tuya-local-key-extractor
When I use the extract.py file I get that error:
Exception in thread Thread-1:
Traceback (most recent call last):
openapi <tuya_iot.openapi.TuyaOpenAPI object at 0x7f52fff924c0>
File "/usr/lib/python3.8/threading.py", line 932, in _bootstrap_inner
openmq <TuyaOpenMQ(Thread-1, started 139994399807232)>
self.run()
File "/home/xavier/.local/lib/python3.8/site-packages/tuya_iot/openmq.py", line 158, in run
self.__run_mqtt()
File "/home/xavier/.local/lib/python3.8/site-packages/tuya_iot/openmq.py", line 164, in __run_mqtt
mq_config = self._get_mqtt_config()
File "/home/xavier/.local/lib/python3.8/site-packages/tuya_iot/openmq.py", line 67, in _get_mqtt_config
"uid": self.api.token_info.uid,
AttributeError: 'NoneType' object has no attribute 'uid'
Traceback (most recent call last):
File "./extract.py", line 41, in <module>
device_manager.update_device_list_in_smart_home()
File "/home/xavier/.local/lib/python3.8/site-packages/tuya_iot/device.py", line 239, in update_device_list_in_smart_home
response = self.api.get(f"/v1.0/users/{self.api.token_info.uid}/devices")
AttributeError: 'NoneType' object has no attribute 'uid'
Here is the code I use:
import json
import logging
import os
from config import (
ENDPOINT,
APP,
EMAIL,
PASSWORD,
ACCESS_ID,
ACCESS_KEY,
)
from tuya_iot import (
TuyaOpenAPI,
AuthType,
TuyaOpenMQ,
TuyaDeviceManager,
)
openapi = TuyaOpenAPI(ENDPOINT, ACCESS_ID, ACCESS_KEY, AuthType.SMART_HOME)
openapi.connect(EMAIL, PASSWORD, country_code=84, schema=APP.value)
openmq = TuyaOpenMQ(openapi)
openmq.start()
print('openapi {}'.format(openapi))
print('openmq {}'.format(openmq))
device_manager = TuyaDeviceManager(openapi, openmq)
device_manager.update_device_list_in_smart_home()
devices = []
for tuya_device in device_manager.device_map.values():
device = {
'device_id': tuya_device.id,
'device_name': tuya_device.name,
'product_id': tuya_device.product_id,
'product_name': tuya_device.product_name,
'category': tuya_device.category,
'uuid': tuya_device.uuid,
'local_key': tuya_device.local_key,
}
try:
resp = openapi.get('/v1.0/iot-03/devices/factory-infos?device_ids={}'.format(tuya_device.id))
factory_info = resp['result'][0]
if 'mac' in factory_info:
mac = ':'.join(factory_info['mac'][i:i + 2] for i in range(0, 12, 2))
device['mac_address'] = mac
except Exception as e:
print(e)
devices.append(device)
print(json.dumps(devices, indent=2))
os._exit(0)
I am on Ubuntu 20.04 and I install the python sdk with tha t command:
pip3 install tuya-iot-py-sdk

Airflow DataprocSubmitJobOperator - ValueError: Protocol message Job has no "python_file_uris" field

I'm using the DataprocSubmitJobOperator on Airflow to schedule pyspark jobs, and when i'm unable to pass pyfiles to the pyspark job
Here is the code i'm using :
DAG
# working - passing jars
PYSPARK_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"pyspark_job": {
"main_python_file_uri": PYSPARK_URI,
"jar_file_uris" : ["gs://dataproc-spark-jars/mongo-spark-connector_2.12-3.0.2.jar",
'gs://dataproc-spark-jars/bson-4.0.5.jar','gs://dataproc-spark-jars/mongo-spark-connector_2.12-3.0.2.jar','gs://dataproc-spark-jars/mongodb-driver-core-4.0.5.jar',
'gs://dataproc-spark-jars/mongodb-driver-sync-4.0.5.jar','gs://dataproc-spark-jars/spark-avro_2.12-3.1.2.jar','gs://dataproc-spark-jars/spark-bigquery-with-dependencies_2.12-0.23.2.jar',
'gs://dataproc-spark-jars/spark-token-provider-kafka-0-10_2.12-3.1.3.jar','gs://dataproc-spark-jars/htrace-core4-4.1.0-incubating.jar','gs://dataproc-spark-jars/hadoop-client-3.3.1.jar','gs://dataproc-spark-jars/spark-sql-kafka-0-10_2.12-3.1.3.jar','gs://dataproc-spark-jars/hadoop-client-runtime-3.3.1.jar','gs://dataproc-spark-jars/hadoop-client-3.3.1.jar','gs://dataproc-spark-jars/kafka-clients-3.2.0.jar','gs://dataproc-spark-jars/commons-pool2-2.11.1.jar'],
"file_uris":['gs://kafka-certs/versa-kafka-gke-ca.p12','gs://kafka-certs/syslog-vani.p12',
'gs://kafka-certs/alarm-compression-user.p12','gs://kafka-certs/appstats-user.p12',
'gs://kafka-certs/insights-user.p12','gs://kafka-certs/intfutil-user.p12',
'gs://kafka-certs/reloadpred-chkpoint-user.p12','gs://kafka-certs/reloadpred-user.p12',
'gs://dataproc-spark-configs/topic-customer-map.cfg','gs://dataproc-spark-configs/params.cfg','gs://kafka-certs/issues-user.p12','gs://kafka-certs/anomaly-user.p12','gs://kafka-certs/appstat-anomaly-user.p12','gs://kafka-certs/appstat-agg-user.p12','gs://kafka-certs/alarmblock-user.p12']
},
"python_file_uris": ['gs://dagger-mongo/move2mongo_api.zip']
}
path = "gs://dataproc-spark-configs/pip_install.sh"
CLUSTER_GENERATOR_CONFIG = ClusterGenerator(
project_id=PROJECT_ID,
zone="us-east1-b",
master_machine_type="n1-highmem-8",
worker_machine_type="n1-highmem-8",
num_workers=2,
storage_bucket="dataproc-spark-logs",
init_actions_uris=[path],
metadata={'PIP_PACKAGES': 'pyyaml requests pandas openpyxl kafka-python google-cloud-storage pyspark'},
).make()
with models.DAG(
'Versa-kafka2mongo_api',
# Continue to run DAG twice per day
default_args=default_dag_args,
#schedule_interval='*/10 * * * *',
schedule_interval=None,
catchup=False,
) as dag:
# create_dataproc_cluster
create_dataproc_cluster = DataprocCreateClusterOperator(
task_id="create_dataproc_cluster",
cluster_name=CLUSTER_NAME,
region=REGION,
cluster_config=CLUSTER_GENERATOR_CONFIG
)
run_dataproc_spark = DataprocSubmitJobOperator(
task_id="run_dataproc_spark",
job=PYSPARK_JOB,
location=REGION,
project_id=PROJECT_ID,
)
delete_dataproc_cluster = DataprocDeleteClusterOperator(
task_id="delete_dataproc_cluster",
project_id=PROJECT_ID,
cluster_name=CLUSTER_NAME,
region=REGION,
trigger_rule=trigger_rule.TriggerRule.ALL_SUCCESS
)
# Define DAG dependencies.
create_dataproc_cluster >> run_dataproc_spark >> delete_dataproc_cluster
Here is the python file(call_kafka2mongo_api.py) code :
from move2mongo_api import alarmBlock_api
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('kafka2mongo_api').getOrCreate()
print(f" spark : {spark}")
spark.sparkContext.addPyFile("move2mongo_api.zip")
cust = ['versa']
for c in cust:
ap = alarmBlock_api(spark, c)
ap.readFromKafka()
Pls note : i upload move2mongo_api.zip to storage bucket ( gs://dagger-mongo/move2mongo_api.zip) before running the Airflow job.
move2mongo_api.zip - contains python file -> alarmBlock_api.py, which is referenced in job file 'call_kafka2mongo.py'
When i run this workflow, the error i get is shown below :
Traceback (most recent call last):
File "/opt/python3.8/lib/python3.8/site-packages/airflow/providers/google/cloud/operators/dataproc.py", line 1849, in execute
job_object = self.hook.submit_job(
File "/opt/python3.8/lib/python3.8/site-packages/airflow/providers/google/common/hooks/base_google.py", line 439, in inner_wrapper
return func(self, *args, **kwargs)
File "/opt/python3.8/lib/python3.8/site-packages/airflow/providers/google/cloud/hooks/dataproc.py", line 870, in submit_job
return client.submit_job(
File "/opt/python3.8/lib/python3.8/site-packages/google/cloud/dataproc_v1/services/job_controller/client.py", line 493, in submit_job
request = jobs.SubmitJobRequest(request)
File "/opt/python3.8/lib/python3.8/site-packages/proto/message.py", line 516, in __init__
pb_value = marshal.to_proto(pb_type, value)
File "/opt/python3.8/lib/python3.8/site-packages/proto/marshal/marshal.py", line 211, in to_proto
pb_value = rule.to_proto(value)
File "/opt/python3.8/lib/python3.8/site-packages/proto/marshal/rules/message.py", line 36, in to_proto
return self._descriptor(**value)
ValueError: Protocol message Job has no "python_file_uris" field.
[2022-07-17, 18:07:00 UTC] {taskinstance.py:1279} INFO - Marking task as UP_FOR_RETRY. dag_id=Versa-kafka2mongo_api, task_id=run_dataproc_spark, execution_date=20220717T180647, start_date=20220717T180659, end_date=20220717T180700
What am i doing wrong here ?
Any ideas how to debug/fix this ?
tia!
You appear to have a layout issue in PYSPARK_JOB.
You have:
PYSPARK_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"pyspark_job": {
"main_python_file_uri": PYSPARK_URI,
"jar_file_uris" : []
},
"python_file_uris": []
}
You want:
PYSPARK_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"pyspark_job": {
"main_python_file_uri": PYSPARK_URI,
"jar_file_uris" : []
"python_file_uris": []
},
}

Empty lines in multiline pattern(python error traceback) in filebeat input are not getting parsed correctly?

The log line which should be harvested and published to logstash as a single line:
[pid: 17318|app: 0|req: 1/2] 10.14.206.28 (jaavedkhan) {60 vars in 1296 bytes} [Mon Dec 30 15:51:38 2019] GET /en/ => generated 27 bytes in 711 msecs (HTTP/1.1 500) 6 headers in 316 bytes (1 switches on core 0)
Mon Dec 30 15:51:39 2019 - announcing my loyalty to the Emperor...
Internal Server Error: /en/
Traceback (most recent call last):
File "/opt/dsr-incentives/venv/lib/python3.5/site-packages/django/core/handlers/exception.py", line 34, in inner
response = get_response(request)
File "/opt/dsr-incentives/venv/lib/python3.5/site-packages/django/core/handlers/base.py", line 126, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/opt/dsr-incentives/venv/lib/python3.5/site-packages/django/core/handlers/base.py", line 124, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/opt/dsr-incentives/venv/lib/python3.5/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/opt/dsr-incentives/venv/lib/python3.5/site-packages/django/views/generic/base.py", line 88, in dispatch
return handler(request, *args, **kwargs)
File "./core/views.py", line 31, in get
1/0
ZeroDivisionError: division by zero
my filebeat configurations:
filebeat:
inputs:
- type: log
paths:
- "/var/log/uwsgi/vassals/dsr-incentives.log"
fields_under_root: true
multiline:
pattern: '\[pid:\s*\d*\|app:'
negate: true
match: after
fields:
log_type: app-access
appserver: uwsgi
app: dsr-incentives
server_name: server-name.domain.com
I checked the multiline pattern https://play.golang.org with the log line:
The result is as expected but the harvester is splitting the log line at "Internat server error"
Publish event: {
"#timestamp": "2019-12-30T13:02:56.564Z",
"#metadata": {
"beat": "filebeat",
"type": "_doc",
"version": "7.5.1"
},
"log": {
"offset": 128736,
"file": {
"path": "/var/log/uwsgi/vassals/dsr-incentives.log"
},
"flags": [
"multiline"
]
},
"appserver": "uwsgi",
"server_name": "xyz",
"log_type": "app-access",
"host": {
"name": "xyz"
},
"agent": {
"hostname": "apps-1",
"id": "d3417bc3-213c-4d5e-a9b5-2273178262d0",
"version": "7.5.1",
"name": "xyz",
"type": "filebeat",
"ephemeral_id": "125578d6-44d1-4103-94bc-a1d062091487"
},
"message": "Internal Server Error: /en/\nTraceback (most recent call last):\n File \"/opt/dsr-incentives/venv/lib/python3.5/site-packages/django/core/handlers/exception.py\", line 34, in inner\n response = get_response(request)\n File \"/opt/dsr-incentives/venv/lib/python3.5/site-packages/django/core/handlers/base.py\", line 126, in _get_response\n response = self.process_exception_by_middleware(e, request)\n File \"/opt/dsr-incentives/venv/lib/python3.5/site-packages/django/core/handlers/base.py\", line 124, in _get_response\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\n File \"/opt/dsr-incentives/venv/lib/python3.5/site-packages/django/views/generic/base.py\", line 68, in view\n return self.dispatch(request, *args, **kwargs)\n File \"/opt/dsr-incentives/venv/lib/python3.5/site-packages/django/views/generic/base.py\", line 88, in dispatch\n return handler(request, *args, **kwargs)\n File \"./core/views.py\", line 31, in get\n 1/0\nZeroDivisionError: division by zero",
"tags": [
"filebeat"
],
"input": {
"type": "log"
},
"app": "dsr-incentives",
"ecs": {
"version": "1.1.0"
}
}
I think the problem is that the multiline is getting split at empty lines that are appearing in logs before "Internal Server Error".
Update
The log message is getting parsed correctly when there are no empty lines above "Internal Server Error"

TatSu: yaml.representer.RepresenterError when dumping to YAML

I have a object model generated by TatSu after doing a successful parse. The model dumps to stdout using JSON format OK. But when I try to dump it to YAML, I get a RepresenterError exception. I am not sure how to solve this. The object model is generated internally by TatSu. Can anyone shed any light on how to potentially resolve this error?
Using python 3.7.0 with TatSu v4.4.0 with pyyaml 5.1.2.
My code:
import sys
import json
import datetime
import tatsu
from tatsu.ast import asjson
from tatsu.objectmodel import Node
from tatsu.semantics import ModelBuilderSemantics
from tatsu.exceptions import FailedParse
class ModelBase(Node):
pass
class MyModelBuilderSemantics(ModelBuilderSemantics):
def __init__(self, context=None, types=None):
types = [
t for t in globals().values()
if type(t) is type and issubclass(t, ModelBase)
] + (types or [])
super(MyModelBuilderSemantics, self).__init__(context=context, types=types)
def main():
sys.setrecursionlimit(10000)
grammar = open('STIL1999.ebnf.working').read()
parser = tatsu.compile(grammar, semantics=MyModelBuilderSemantics(), asmodel=True)
assert (parser is not None)
try:
start = datetime.datetime.now()
ast = parser.parse(open(sys.argv[1]).read(), filename=sys.argv[1])
finish = datetime.datetime.now()
print('Total = %s' % (finish - start).total_seconds())
print(json.dumps(asjson(ast), indent=2))
except FailedParse as e:
print('Parse error : %s' % e.message)
print(e.buf.line_info(e.pos))
return 1
from tatsu.yaml import ast_dump
ast_dump(ast, stream=open('foo.yaml', 'w'))
return 0
if __name__ == '__main__':
sys.exit(main())
The output:
Total = 0.007043
{
"__class__": "StilSession",
"version": {
"ver": 1.0
},
"header": {
"__class__": "Header",
"objs": [
{
"k": "Title",
"v": "foo.gz"
},
{
"k": "Date",
"v": "Mon Nov 4 02:48:48 2019"
},
{
"k": "Source",
"v": "foo.gz"
},
{
"k": "History",
"objs": [
{
"__class__": "Annotation",
"ann": " This is a test "
}
]
}
]
},
"blocks": []
}
Traceback (most recent call last):
File "./run.py", line 57, in <module>
sys.exit(main())
File "./run.py", line 52, in main
ast_dump(ast, stream=open('foo.yaml', 'w'))
File "/sw_tools/anaconda3/lib/python3.7/site-packages/tatsu/yaml.py", line 50, i
n ast_dump
return dump(data, object_pairs_hook=AST, **kwargs)
File "/sw_tools/anaconda3/lib/python3.7/site-packages/tatsu/yaml.py", line 33, i
n dump
**kwds
File "/sw_tools/anaconda3/lib/python3.7/site-packages/yaml/__init__.py", line 29
0, in dump
return dump_all([data], stream, Dumper=Dumper, **kwds)
File "/sw_tools/anaconda3/lib/python3.7/site-packages/yaml/__init__.py", line 27
8, in dump_all
dumper.represent(data)
File "/sw_tools/anaconda3/lib/python3.7/site-packages/yaml/representer.py", line
27, in represent
node = self.represent_data(data)
File "/sw_tools/anaconda3/lib/python3.7/site-packages/yaml/representer.py", line
58, in represent_data
node = self.yaml_representers[None](self, data)
File "/sw_tools/anaconda3/lib/python3.7/site-packages/yaml/representer.py", line
231, in represent_undefined
raise RepresenterError("cannot represent an object", data)
yaml.representer.RepresenterError: ('cannot represent an object', <tatsu.synth.StilSession object at 0x7ffff6
8e8f98>)
This issue was resolved by the OP in TatSu pull request #146

How do I get $near to work in Flask-mongoalchemy

I have been trying to work on fetching events from my mongoDB database that are close to the current user's location
I have tried to reformat my Model scheme to contain [type: "Point"] and even arranged my longitude and latitude into a list.
I also tried adding "2dsphere" indexes using meta to my Model based on what I saw in mongo-alchemy documentation.
My Model
class Event(db.Document):
meta = {
'indexes': [
("*location.coordinates", "2dsphere")
]
}
user_id = db.StringField()
uuid = db.StringField()
name = db.StringField()
address = db.StringField()
start_time = db.DateTimeField(required=True, default=datetime.datetime.now())
end_time = db.DateTimeField(required=True, default=datetime.datetime.now())
location = db.DictField(db.AnythingField())
This is now my main query code
def get(self):
latitude = float(request.args.get('lat'))
longitude = float(request.args.get('long'))
print(longitude);
print(latitude);
event = Event.query.filter({"location" :
{ "$near" :
{
"$geometry" : {
"type" : "Point",
"coordinates" : [longitude, latitude] },
"$maxDistance" : 4000
}
}
}).first()
print(event);
[2019-01-15 23:20:59,797] ERROR in app: Exception on /v1/event [GET]
Traceback (most recent call last):
File "/home/creative_joe/.local/lib/python3.5/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/home/creative_joe/.local/lib/python3.5/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functionsrule.endpoint
File "/home/creative_joe/.local/lib/python3.5/site-packages/flask_restplus/api.py", line 325, in wrapper
resp = resource(*args, **kwargs)
File "/home/creative_joe/.local/lib/python3.5/site-packages/flask/views.py", line 88, in view
return self.dispatch_request(*args, **kwargs)
File "/home/creative_joe/.local/lib/python3.5/site-packages/flask_restplus/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/media/creative_joe/3004586c-9a2d-4cb0-8a5f-d41fe99afc05/home/creative_joe/MonkeyMusic.server/app/views/main.py", line 323, in get
"$maxDistance" : 4000
File "/home/creative_joe/.local/lib/python3.5/site-packages/mongoalchemy/query.py", line 139, in first
for doc in iter(self):
File "/home/creative_joe/.local/lib/python3.5/site-packages/mongoalchemy/query.py", line 412, in next
return self._next_internal()
File "/home/creative_joe/.local/lib/python3.5/site-packages/mongoalchemy/query.py", line 416, in _next_internal
value = next(self.cursor)
File "/home/creative_joe/.local/lib/python3.5/site-packages/mongoalchemy/py3compat.py", line 41, in next
return it.next()
File "/home/creative_joe/.local/lib/python3.5/site-packages/pymongo/cursor.py", line 1189, in next
if len(self.__data) or self._refresh():
File "/home/creative_joe/.local/lib/python3.5/site-packages/pymongo/cursor.py", line 1104, in _refresh
self.__send_message(q)
File "/home/creative_joe/.local/lib/python3.5/site-packages/pymongo/cursor.py", line 982, in __send_message
helpers._check_command_response(first)
File "/home/creative_joe/.local/lib/python3.5/site-packages/pymongo/helpers.py", line 155, in _check_command_response
raise OperationFailure(msg % errmsg, code, response)
pymongo.errors.OperationFailure: error processing query: ns=heroku_c9gg06k0.EventTree: GEONEAR field=location maxdist=4000 isNearSphere=0
Sort: {}
Proj: {}
planner returned error: unable to find index for $geoNear query

Resources