TatSu: yaml.representer.RepresenterError when dumping to YAML - python-3.x

I have a object model generated by TatSu after doing a successful parse. The model dumps to stdout using JSON format OK. But when I try to dump it to YAML, I get a RepresenterError exception. I am not sure how to solve this. The object model is generated internally by TatSu. Can anyone shed any light on how to potentially resolve this error?
Using python 3.7.0 with TatSu v4.4.0 with pyyaml 5.1.2.
My code:
import sys
import json
import datetime
import tatsu
from tatsu.ast import asjson
from tatsu.objectmodel import Node
from tatsu.semantics import ModelBuilderSemantics
from tatsu.exceptions import FailedParse
class ModelBase(Node):
pass
class MyModelBuilderSemantics(ModelBuilderSemantics):
def __init__(self, context=None, types=None):
types = [
t for t in globals().values()
if type(t) is type and issubclass(t, ModelBase)
] + (types or [])
super(MyModelBuilderSemantics, self).__init__(context=context, types=types)
def main():
sys.setrecursionlimit(10000)
grammar = open('STIL1999.ebnf.working').read()
parser = tatsu.compile(grammar, semantics=MyModelBuilderSemantics(), asmodel=True)
assert (parser is not None)
try:
start = datetime.datetime.now()
ast = parser.parse(open(sys.argv[1]).read(), filename=sys.argv[1])
finish = datetime.datetime.now()
print('Total = %s' % (finish - start).total_seconds())
print(json.dumps(asjson(ast), indent=2))
except FailedParse as e:
print('Parse error : %s' % e.message)
print(e.buf.line_info(e.pos))
return 1
from tatsu.yaml import ast_dump
ast_dump(ast, stream=open('foo.yaml', 'w'))
return 0
if __name__ == '__main__':
sys.exit(main())
The output:
Total = 0.007043
{
"__class__": "StilSession",
"version": {
"ver": 1.0
},
"header": {
"__class__": "Header",
"objs": [
{
"k": "Title",
"v": "foo.gz"
},
{
"k": "Date",
"v": "Mon Nov 4 02:48:48 2019"
},
{
"k": "Source",
"v": "foo.gz"
},
{
"k": "History",
"objs": [
{
"__class__": "Annotation",
"ann": " This is a test "
}
]
}
]
},
"blocks": []
}
Traceback (most recent call last):
File "./run.py", line 57, in <module>
sys.exit(main())
File "./run.py", line 52, in main
ast_dump(ast, stream=open('foo.yaml', 'w'))
File "/sw_tools/anaconda3/lib/python3.7/site-packages/tatsu/yaml.py", line 50, i
n ast_dump
return dump(data, object_pairs_hook=AST, **kwargs)
File "/sw_tools/anaconda3/lib/python3.7/site-packages/tatsu/yaml.py", line 33, i
n dump
**kwds
File "/sw_tools/anaconda3/lib/python3.7/site-packages/yaml/__init__.py", line 29
0, in dump
return dump_all([data], stream, Dumper=Dumper, **kwds)
File "/sw_tools/anaconda3/lib/python3.7/site-packages/yaml/__init__.py", line 27
8, in dump_all
dumper.represent(data)
File "/sw_tools/anaconda3/lib/python3.7/site-packages/yaml/representer.py", line
27, in represent
node = self.represent_data(data)
File "/sw_tools/anaconda3/lib/python3.7/site-packages/yaml/representer.py", line
58, in represent_data
node = self.yaml_representers[None](self, data)
File "/sw_tools/anaconda3/lib/python3.7/site-packages/yaml/representer.py", line
231, in represent_undefined
raise RepresenterError("cannot represent an object", data)
yaml.representer.RepresenterError: ('cannot represent an object', <tatsu.synth.StilSession object at 0x7ffff6
8e8f98>)

This issue was resolved by the OP in TatSu pull request #146

Related

Airflow DataprocSubmitJobOperator - ValueError: Protocol message Job has no "python_file_uris" field

I'm using the DataprocSubmitJobOperator on Airflow to schedule pyspark jobs, and when i'm unable to pass pyfiles to the pyspark job
Here is the code i'm using :
DAG
# working - passing jars
PYSPARK_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"pyspark_job": {
"main_python_file_uri": PYSPARK_URI,
"jar_file_uris" : ["gs://dataproc-spark-jars/mongo-spark-connector_2.12-3.0.2.jar",
'gs://dataproc-spark-jars/bson-4.0.5.jar','gs://dataproc-spark-jars/mongo-spark-connector_2.12-3.0.2.jar','gs://dataproc-spark-jars/mongodb-driver-core-4.0.5.jar',
'gs://dataproc-spark-jars/mongodb-driver-sync-4.0.5.jar','gs://dataproc-spark-jars/spark-avro_2.12-3.1.2.jar','gs://dataproc-spark-jars/spark-bigquery-with-dependencies_2.12-0.23.2.jar',
'gs://dataproc-spark-jars/spark-token-provider-kafka-0-10_2.12-3.1.3.jar','gs://dataproc-spark-jars/htrace-core4-4.1.0-incubating.jar','gs://dataproc-spark-jars/hadoop-client-3.3.1.jar','gs://dataproc-spark-jars/spark-sql-kafka-0-10_2.12-3.1.3.jar','gs://dataproc-spark-jars/hadoop-client-runtime-3.3.1.jar','gs://dataproc-spark-jars/hadoop-client-3.3.1.jar','gs://dataproc-spark-jars/kafka-clients-3.2.0.jar','gs://dataproc-spark-jars/commons-pool2-2.11.1.jar'],
"file_uris":['gs://kafka-certs/versa-kafka-gke-ca.p12','gs://kafka-certs/syslog-vani.p12',
'gs://kafka-certs/alarm-compression-user.p12','gs://kafka-certs/appstats-user.p12',
'gs://kafka-certs/insights-user.p12','gs://kafka-certs/intfutil-user.p12',
'gs://kafka-certs/reloadpred-chkpoint-user.p12','gs://kafka-certs/reloadpred-user.p12',
'gs://dataproc-spark-configs/topic-customer-map.cfg','gs://dataproc-spark-configs/params.cfg','gs://kafka-certs/issues-user.p12','gs://kafka-certs/anomaly-user.p12','gs://kafka-certs/appstat-anomaly-user.p12','gs://kafka-certs/appstat-agg-user.p12','gs://kafka-certs/alarmblock-user.p12']
},
"python_file_uris": ['gs://dagger-mongo/move2mongo_api.zip']
}
path = "gs://dataproc-spark-configs/pip_install.sh"
CLUSTER_GENERATOR_CONFIG = ClusterGenerator(
project_id=PROJECT_ID,
zone="us-east1-b",
master_machine_type="n1-highmem-8",
worker_machine_type="n1-highmem-8",
num_workers=2,
storage_bucket="dataproc-spark-logs",
init_actions_uris=[path],
metadata={'PIP_PACKAGES': 'pyyaml requests pandas openpyxl kafka-python google-cloud-storage pyspark'},
).make()
with models.DAG(
'Versa-kafka2mongo_api',
# Continue to run DAG twice per day
default_args=default_dag_args,
#schedule_interval='*/10 * * * *',
schedule_interval=None,
catchup=False,
) as dag:
# create_dataproc_cluster
create_dataproc_cluster = DataprocCreateClusterOperator(
task_id="create_dataproc_cluster",
cluster_name=CLUSTER_NAME,
region=REGION,
cluster_config=CLUSTER_GENERATOR_CONFIG
)
run_dataproc_spark = DataprocSubmitJobOperator(
task_id="run_dataproc_spark",
job=PYSPARK_JOB,
location=REGION,
project_id=PROJECT_ID,
)
delete_dataproc_cluster = DataprocDeleteClusterOperator(
task_id="delete_dataproc_cluster",
project_id=PROJECT_ID,
cluster_name=CLUSTER_NAME,
region=REGION,
trigger_rule=trigger_rule.TriggerRule.ALL_SUCCESS
)
# Define DAG dependencies.
create_dataproc_cluster >> run_dataproc_spark >> delete_dataproc_cluster
Here is the python file(call_kafka2mongo_api.py) code :
from move2mongo_api import alarmBlock_api
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('kafka2mongo_api').getOrCreate()
print(f" spark : {spark}")
spark.sparkContext.addPyFile("move2mongo_api.zip")
cust = ['versa']
for c in cust:
ap = alarmBlock_api(spark, c)
ap.readFromKafka()
Pls note : i upload move2mongo_api.zip to storage bucket ( gs://dagger-mongo/move2mongo_api.zip) before running the Airflow job.
move2mongo_api.zip - contains python file -> alarmBlock_api.py, which is referenced in job file 'call_kafka2mongo.py'
When i run this workflow, the error i get is shown below :
Traceback (most recent call last):
File "/opt/python3.8/lib/python3.8/site-packages/airflow/providers/google/cloud/operators/dataproc.py", line 1849, in execute
job_object = self.hook.submit_job(
File "/opt/python3.8/lib/python3.8/site-packages/airflow/providers/google/common/hooks/base_google.py", line 439, in inner_wrapper
return func(self, *args, **kwargs)
File "/opt/python3.8/lib/python3.8/site-packages/airflow/providers/google/cloud/hooks/dataproc.py", line 870, in submit_job
return client.submit_job(
File "/opt/python3.8/lib/python3.8/site-packages/google/cloud/dataproc_v1/services/job_controller/client.py", line 493, in submit_job
request = jobs.SubmitJobRequest(request)
File "/opt/python3.8/lib/python3.8/site-packages/proto/message.py", line 516, in __init__
pb_value = marshal.to_proto(pb_type, value)
File "/opt/python3.8/lib/python3.8/site-packages/proto/marshal/marshal.py", line 211, in to_proto
pb_value = rule.to_proto(value)
File "/opt/python3.8/lib/python3.8/site-packages/proto/marshal/rules/message.py", line 36, in to_proto
return self._descriptor(**value)
ValueError: Protocol message Job has no "python_file_uris" field.
[2022-07-17, 18:07:00 UTC] {taskinstance.py:1279} INFO - Marking task as UP_FOR_RETRY. dag_id=Versa-kafka2mongo_api, task_id=run_dataproc_spark, execution_date=20220717T180647, start_date=20220717T180659, end_date=20220717T180700
What am i doing wrong here ?
Any ideas how to debug/fix this ?
tia!
You appear to have a layout issue in PYSPARK_JOB.
You have:
PYSPARK_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"pyspark_job": {
"main_python_file_uri": PYSPARK_URI,
"jar_file_uris" : []
},
"python_file_uris": []
}
You want:
PYSPARK_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"pyspark_job": {
"main_python_file_uri": PYSPARK_URI,
"jar_file_uris" : []
"python_file_uris": []
},
}

pyspark error: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.newAPIHadoopRDD

When I tried to search in Spark to Elasticsearch an error ocurred
The code that i use is the following:
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark import SparkConf
from pyspark.sql import SparkSession
body2="""{
"query": {
"bool": {
"must": [
{
"range": {
"#timestamp": {
"lte": "2022-05-05T09:25:15.000-03:00",
"gte": "2022-05-04T09:25:15.000-03:00"
}
}
},
{
"match": {
"type.keyword": "TABLA1"
}
}
]
}
},
"size":10
}"""
sconf = SparkConf().setAppName("Test")
sc = SparkContext.getOrCreate(sconf);
es_read_conf = {
"es.nodes": "10.48.17.67",
"es.port": "9200",
"es.query": body2,
"es.resource" : "Incice1/TABLA1",
"es.nodes.wan.only": "true",
"es.net.http.auth.user": "user1",
"es.net.http.auth.pass": "Askjd56jd8"
}
es_rdd = sc.newAPIHadoopRDD(
inputFormatClass="org.elasticsearch.hadoop.mr.EsInputFormat",
keyClass="org.apache.hadoop.io.NullWritable",
valueClass="org.elasticsearch.hadoop.mr.LinkedMapWritable",
conf=es_read_conf)
The error that I get when excecute the code in pyspark:
... inputFormatClass="org.elasticsearch.hadoop.mr.EsInputFormat",
keyClass="org.apache.... hadoop.io.NullWritable",
valueClass="org... .elasticsearch.hadoop.mr.LinkedMapWritable",
conf... =es_read_conf)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/spark/python/pyspark/context.py", line 859, in newAPIHadoopRDD
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
File "/opt/spark/python/lib/py4j-0.10.9.3-src.zip/py4j/java_gateway.py", line 1321, in __call__
File "/opt/spark/python/pyspark/sql/utils.py", line 111, in deco
return f(*a, **kw)
File "/opt/spark/python/lib/py4j-0.10.9.3-src.zip/py4j/protocol.py", line 326, in get_return_value
py4j.protocol.Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.newAPIHadoopRDD.
: java.lang.ClassNotFoundException: org.elasticsearch.hadoop.mr.LinkedMapWritable
at java.base/java.net.URLClassLoader.findClass(URLClassLoader.java:476)
at java.base/java.lang.ClassLoader.loadClass(ClassLoader.java:589)
at java.base/java.lang.ClassLoader.loadClass(ClassLoader.java:522)
at java.base/java.lang.Class.forName0(Native Method)
at java.base/java.lang.Class.forName(Class.java:398)
at org.apache.spark.util.Utils$.classForName(Utils.scala:216)
(...)
I get a search in devtools of elasticsearch but when I search in pyspark I get this error
Thanks for hear and the response

I want to know how to read the value of headers in SetTask class after configuring tasks = [SetTask]?

from locust import HttpUser, task, between,SequentialTaskSet,TaskSet,User,events
import json
#events.test_start.add_listener
def on_start(**kwargs):
print("A test will start...")
#events.test_stop.add_listener
def on_stop(**kwargs):
print("A test is ending...")
class SetTask(TaskSet):
#task
def getLogDetail(self):
deatil_url = "/auth/online?page=0&size=10&sort=id%2Cdesc"
with self.client.request(method='GET',
url=deatil_url,
headers=self.headers,
name='get log detail') as response:
print(response.text)
class FlaskTask(SequentialTaskSet):
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
def on_start(self):
res = self.client.request(method='GET', url="/auth/code",name="get code")
uuid = res.json()['uuid']
headers = {
"content-type": "application/json;charset=UTF-8",
}
datda_info = {
"code": "8",
"password": "B0GdcVWB+XtTwsyBjzoRkn8VnSgtPVKpl8mp7AuQ+BTeU030grUkRwmOHXFCjEhKXB7yezBS7dFEJ63ykR2piQ==",
"username": "admin",
"uuid": uuid
}
with self.client.request(method='POST',url="/auth/login", headers=headers, catch_response=True,data=json.dumps(datda_info),name="get token") as response:
self.token = response.json()['token']
if response.status_code == 200:
self.token = response.json()['token']
response.success()
else:
response.failure("get token failed")
self.headers = {
"Authorization": self.token
}
tasks = [SetTask]
#task
def getUserDetail(self):
deatil_url = "/api/dictDetail?dictName=user_status&page=0&size=9999"
with self.client.request(method='GET',
url=deatil_url,
headers=self.headers,
name='get user detail') as response:
print(response.text)
def function_task():
print("This is the function task")
class FlaskUser(HttpUser):
host = 'http://192.168.31.243:8888'
wait_time = between(1,3)
tasks = [FlaskTask,function_task]
I got error:
A test will start...
[2022-03-26 00:04:56,146] wangguilin/INFO/locust.runners: Ramping to 1 users at a rate of 1.00 per second
[2022-03-26 00:04:56,147] wangguilin/INFO/locust.runners: All users spawned: {"FlaskUser": 1} (1 total users)
[2022-03-26 00:04:56,148] wangguilin/ERROR/locust.user.task: function_task() takes 0 positional arguments but 1 was given
Traceback (most recent call last):
File "c:\users\Users\pycharmprojects\testlocust\venv\lib\site-packages\locust\user\task.py", line 319, in run
self.execute_next_task()
File "c:\users\Users\pycharmprojects\testlocust\venv\lib\site-packages\locust\user\task.py", line 344, in execute_next_task
self.execute_task(self._task_queue.pop(0))
File "c:\users\Users\pycharmprojects\testlocust\venv\lib\site-packages\locust\user\task.py", line 457, in execute_task
task(self.user)
TypeError: function_task() takes 0 positional arguments but 1 was given
[2022-03-26 00:04:58,154] wangguilin/ERROR/locust.user.task: 'SetTask' object has no attribute 'headers'
Traceback (most recent call last):
File "c:\users\Users\pycharmprojects\testlocust\venv\lib\site-packages\locust\user\task.py", line 319, in run
self.execute_next_task()
File "c:\users\Users\pycharmprojects\testlocust\venv\lib\site-packages\locust\user\task.py", line 344, in execute_next_task
self.execute_task(self._task_queue.pop(0))
File "c:\users\Users\pycharmprojects\testlocust\venv\lib\site-packages\locust\user\task.py", line 356, in execute_task
task(self)
File "C:\Users\Users\PycharmProjects\testlocust\locustflask.py", line 22, in getLogDetail
headers=self.headers,
AttributeError: 'SetTask' object has no attribute 'headers'
Traceback (most recent call last):
File "c:\users\Users\pycharmprojects\testlocust\venv\lib\site-packages\gevent_ffi\loop.py", line 270, in python_check_callback
def python_check_callback(self, watcher_ptr): # pylint:disable=unused-argument
question:
I want to know how to read the value of headers in SetTask class after configuring tasks = [SetTask]?
my locust version is: 2.8.3
So can parameters be passed in this case?

How to pass multiple parameters for aiohttp request wit the same name?

The API from which I want to receive data takes several values with the same name as a parameter:
https://example.com/method?types=Retail&types=Office&types=Industrial......
I try to do it like this:
params_api_sale = {
'types': [
'Retail', 'Office', 'Industrial',
'Mixed Use', 'Development Site',
'Land', 'Special Purpose', 'Other'
],
'sortDirection': 'Descending',
'sortOrder': 'ActivatedOn'
}
async def get_items():
async with aiohttp.request(
method='GET', url=vars_.url_api_sale,
headers=vars_.headers, params=vars_.params_api_sale) as response:
...
But this gives me an error:
Traceback (most recent call last):
File "foo.py", line 85, in <module>
asyncio.get_event_loop().run_until_complete(main())
File "C:\Python37\lib\asyncio\base_events.py", line 583, in run_until_complete
return future.result()
File "foo.py", line 80, in main
await asyncio.create_task(get_items())
File "foo.py", line 44, in get_items
headers=vars_.headers, params=vars_.params_api_sale) as response:
File "D:\Dev\venv\lib\site-packages\aiohttp\client.py", line 1051, in __aenter__
self._resp = await self._coro
File "D:\Dev\venv\lib\site-packages\aiohttp\client.py", line 473, in _request
ssl=ssl, proxy_headers=proxy_headers, traces=traces)
File "D:\Dev\venv\lib\site-packages\aiohttp\client_reqrep.py", line 263, in __init__
url2 = url.with_query(params)
File "D:\Dev\venv\lib\site-packages\yarl\__init__.py", line 922, in with_query
new_query = self._get_str_query(*args, **kwargs)
File "D:\Dev\venv\lib\site-packages\yarl\__init__.py", line 886, in _get_str_query
quoter(k) + "=" + quoter(self._query_var(v)) for k, v in query.items()
File "D:\Dev\venv\lib\site-packages\yarl\__init__.py", line 886, in <genexpr>
quoter(k) + "=" + quoter(self._query_var(v)) for k, v in query.items()
File "D:\Dev\venv\lib\site-packages\yarl\__init__.py", line 864, in _query_var
"of type {}".format(v, type(v))
TypeError: Invalid variable type: value should be str or int, got ['Retail', 'Office', 'Industrial', 'Mixed Use', 'Development Site', 'Land', 'Special Purpose', 'Other'] of type <class 'list'>
I have not found a documented way to do this. All that comes to my mind is to generate a URL string with these parameters, of course, if there is no more correct way.
import requests
params = {
'types': [
'Retail', 'Office', 'Industrial',
'Mixed Use', 'Development Site',
'Land', 'Special Purpose', 'Other'
],
'sortDirection': 'Descending',
'sortOrder': 'ActivatedOn'
}
r = requests.get("http://www.test.com/", params=params)
print(r.url)
output:
https://www.test.com/?types=Retail&types=Office&types=Industrial&types=Mixed+Use&types=Development+Site&types=Land&types=Special+Purpose&types=Other&sortDirection=Descending&sortOrder=ActivatedOn
For aiohttp you can use MultiDict which is also included in the official document.
For sending data with multiple values for the same key MultiDict may be used as well.
As stated at aiohttp documentation, you can pass a list of tuples to params with the same key to achieve your goal:
Also it is possible to pass list of 2 items tuples as parameters, in that case you can specifiy multiple values for each key:
params = [
("key1", "value1"),
("key1", "value2"),
("key2", "value3"),
]

Google Adwords Traffic Estimator Service and Python

I've downloaded a code sample that looks for particular keywords and pulls some metrics. I've noticed a lot of Google Adwords API examples are compliant with python 3.x so I'm wondering if there is an issue with that? See below for code sample:
from googleads import adwords
def main(client):
# Initialize appropriate service.
traffic_estimator_service = client.GetService(
'TrafficEstimatorService', version='v201609')
# Construct selector object and retrieve traffic estimates.
keywords = [
{'text': 'mars cruise', 'matchType': 'BROAD'},
{'text': 'cheap cruise', 'matchType': 'PHRASE'},
{'text': 'cruise', 'matchType': 'EXACT'}
]
negative_keywords = [
{'text': 'moon walk', 'matchType': 'BROAD'}
]
keyword_estimate_requests = []
for keyword in keywords:
keyword_estimate_requests.append({
'keyword': {
'xsi_type': 'Keyword',
'matchType': keyword['matchType'],
'text': keyword['text']
}
})
for keyword in negative_keywords:
keyword_estimate_requests.append({
'keyword': {
'xsi_type': 'Keyword',
'matchType': keyword['matchType'],
'text': keyword['text']
},
'isNegative': 'true'
})
# Create ad group estimate requests.
adgroup_estimate_requests = [{
'keywordEstimateRequests': keyword_estimate_requests,
'maxCpc': {
'xsi_type': 'Money',
'microAmount': '1000000'
}
}]
# Create campaign estimate requests.
campaign_estimate_requests = [{
'adGroupEstimateRequests': adgroup_estimate_requests,
'criteria': [
{
'xsi_type': 'Location',
'id': '2840' # United States.
},
{
'xsi_type': 'Language',
'id': '1000' # English.
}
],
}]
# Create the selector.
selector = {
'campaignEstimateRequests': campaign_estimate_requests,
}
# Optional: Request a list of campaign-level estimates segmented by
# platform.
selector['platformEstimateRequested'] = True
# Get traffic estimates.
estimates = traffic_estimator_service.get(selector)
campaign_estimate = estimates['campaignEstimates'][0]
# Display the campaign level estimates segmented by platform.
if 'platformEstimates' in campaign_estimate:
platform_template = ('Results for the platform with ID: "%d" and name: '
'"%s".')
for platform_estimate in campaign_estimate['platformEstimates']:
platform = platform_estimate['platform']
DisplayEstimate(platform_template % (platform['id'],
platform['platformName']),
platform_estimate['minEstimate'],
platform_estimate['maxEstimate'])
# Display the keyword estimates.
if 'adGroupEstimates' in campaign_estimate:
ad_group_estimate = campaign_estimate['adGroupEstimates'][0]
if 'keywordEstimates' in ad_group_estimate:
keyword_estimates = ad_group_estimate['keywordEstimates']
keyword_template = ('Results for the keyword with text "%s" and match '
'type "%s":')
keyword_estimates_and_requests = zip(keyword_estimates,
keyword_estimate_requests)
for keyword_tuple in keyword_estimates_and_requests:
if keyword_tuple[1].get('isNegative', False):
continue
keyword = keyword_tuple[1]['keyword']
keyword_estimate = keyword_tuple[0]
DisplayEstimate(keyword_template % (keyword['text'],
keyword['matchType']),
keyword_estimate['min'], keyword_estimate['max'])
def _CalculateMean(min_est, max_est):
if min_est and max_est:
return (float(min_est) + float(max_est)) / 2.0
else:
return None
def _FormatMean(mean):
if mean:
return '%.2f' % mean
else:
return 'N/A'
def DisplayEstimate(message, min_estimate, max_estimate):
"""Displays mean average cpc, position, clicks, and total cost for estimate.
Args:
message: str message to display for the given estimate.
min_estimate: sudsobject containing a minimum estimate from the
TrafficEstimatorService response.
max_estimate: sudsobject containing a maximum estimate from the
TrafficEstimatorService response.
"""
# Find the mean of the min and max values.
mean_avg_cpc = (_CalculateMean(min_estimate['averageCpc']['microAmount'],
max_estimate['averageCpc']['microAmount'])
if 'averageCpc' in min_estimate else None)
mean_avg_pos = (_CalculateMean(min_estimate['averagePosition'],
max_estimate['averagePosition'])
if 'averagePosition' in min_estimate else None)
mean_clicks = _CalculateMean(min_estimate['clicksPerDay'],
max_estimate['clicksPerDay'])
mean_total_cost = _CalculateMean(min_estimate['totalCost']['microAmount'],
max_estimate['totalCost']['microAmount'])
print (message)
print ('Estimated average CPC: %s' % _FormatMean(mean_avg_cpc))
print ('Estimated ad position: %s' % _FormatMean(mean_avg_pos))
print ('Estimated daily clicks: %s' % _FormatMean(mean_clicks))
print ('Estimated daily cost: %s' % _FormatMean(mean_total_cost))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
Here is the error message:
(Money) not-found
path: "Money", not-found
(Keyword) not-found
path: "Keyword", not-found
(Keyword) not-found
path: "Keyword", not-found
(Keyword) not-found
path: "Keyword", not-found
(Keyword) not-found
path: "Keyword", not-found
(Location) not-found
path: "Location", not-found
(Language) not-found
path: "Language", not-found
<suds.sax.document.Document object at 0x03BF1D10>
Server raised fault in response.
Traceback (most recent call last):
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\lib\site-packages\suds\transport\http.py", line 82, in send
fp = self.u2open(u2request)
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\lib\site-packages\suds\transport\http.py", line 132, in u2open
return url.open(u2request, timeout=tm)
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 472, in open
response = meth(req, response)
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 582, in http_response
'http', request, response, code, msg, hdrs)
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 510, in error
return self._call_chain(*args)
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 444, in _call_chain
result = func(*args)
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\lib\urllib\request.py", line 590, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 500: Internal Server Error
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\lib\site-packages\suds\client.py", line 613, in send
reply = self.options.transport.send(request)
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\lib\site-packages\suds\transport\http.py", line 94, in send
raise TransportError(e.msg, e.code, e.fp)
suds.transport.TransportError: Internal Server Error
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\adwords test - Copy (2).py", line 177, in <module>
main(adwords_client)
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\adwords test - Copy (2).py", line 95, in main
estimates = traffic_estimator_service.get(selector)
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\lib\site-packages\googleads\common.py", line 696, in MakeSoapRequest
raise e
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\lib\site-packages\googleads\common.py", line 692, in MakeSoapRequest
for arg in args])
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\lib\site-packages\suds\client.py", line 521, in __call__
return client.invoke(args, kwargs)
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\lib\site-packages\suds\client.py", line 581, in invoke
result = self.send(soapenv)
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\lib\site-packages\suds\client.py", line 619, in send
description=tostr(e), original_soapenv=original_soapenv)
File "C:\Users\sfroese\AppData\Local\Programs\Python\Python35-32\lib\site-packages\suds\client.py", line 670, in process_reply
raise WebFault(fault, replyroot)
suds.WebFault: Server raised fault: '[AuthenticationError.CLIENT_CUSTOMER_ID_IS_REQUIRED # ; trigger:'<null>']'
You should set client_customer_id in your googleads.yaml file . you can get client_customer_id from your manager account. go to your manager account and add client then copy your id from upright corner of screen. in googleads.yaml file paste that id to client_customer_id .

Resources