Requests.post returns top 50 records only even after setting offset and limit - python-3.x

I am running query in CI_INFOOBJECTS to fetch all the webi documents present in root folder and subfolders.
This query returns 70 records in Query Builder but when i am running it using requests.post, it gives me top 50 records only. I tried changing offset and limit but still returning same 50 records.
Can anyone help me resolve this as this is the best solution that i found till now to get all the reports from folders and sub folders to update the source universe.
folder_get = requests.get(bip_url + '/v1/cmsquery', headers=headers)
folder_root = etree.fromstring(folder_get.text)
Query_var = 'SELECT SI_ID,SI_NAME FROM CI_INFOOBJECTS WHERE SI_KIND = \'WEBI\' AND SI_ANCESTOR = 6526 ORDER BY SI_ID'
folder_root[0].text = Query_var
data1 = etree.tostring(folder_root)
folder_post = requests.post(bip_url + '/v1/cmsquery?offset=51&limit=100', headers = headers, data = data1)
folder_post.content

Try using page and pagesize instead of offset and limit.
folder_post = requests.post(bip_url + '/v1/cmsquery?page=1&pagesize=100', headers = headers, data = data1)
This should give you the 70 records that you expect.

Related

try to modify before display put in variable, but error print(record['recommendation']) become temp=record['recommendation']

from neo4j import GraphDatabase, basic_auth
driver = GraphDatabase.driver(
"neo4j://34.201.9.108:7687",
auth=basic_auth("neo4j", "chart-certifications-bottom"))
cypher_query = '''
MATCH (m:Movie {title:$movie})<-[:RATED]-(u:User)-[:RATED]->(rec:Movie)
RETURN distinct rec.title AS recommendation LIMIT 20
'''
with driver.session(database="neo4j") as session:
results = session.read_transaction(
lambda tx: tx.run(cypher_query,
movie="Crimson Tide").data())
for record in results:
print(record['recommendation']) #<----------------------- OK
driver.close()
try to modify before display put in variable, but error
print(record['recommendation']) become temp=record['recommendation']
#app.get("/neo4j")
def graph_db():
driver = GraphDatabase.driver(
"neo4j://34.201.9.108:7687",
auth=basic_auth("neo4j", "chart-certifications-bottom"))
cypher_query = '''
MATCH (n:Person) RETURN n LIMIT 25
'''
with driver.session(database="neo4j") as session:
results = session.read_transaction(
lambda tx: tx.run(cypher_query,
movie="Crimson Tide").data())
data=[]
for record in results:
temp=record['recommendation'] #<-----------------------error
data.append(temp)
result=data
driver.close()
return{"Result ":result}
When you replaced your query to
MATCH (n:Person) RETURN n LIMIT 25
You are returning nodes of 25 Person and the column recommendation does not exists.
temp=record['recommendation']
Make sure that the result set that your query is returning is the same with the column name that you are accessing.

Twitter API: How to make query keep running?

Novice programmer here seeking help.
I already set up my code to my requirements to use the Twitter's premium API.
SEARCH_TERM = '#AAPL OR #FB OR #KO OR #ABT OR #PEPCO'
PRODUCT = 'fullarchive'
LABEL = 'my_label'
r = api.request('tweets/search/%s/:%s' % (PRODUCT, LABEL),
{'query':SEARCH_TERM, 'fromDate':201501010000, 'toDate':201812310000})
However, when I run it I obtain the maximum number of tweets per search which is 500.
My question is should I add to the query maxResults = 500? And how do I use the next parameter to keep the code running until all the tweets that correspond to my query are obtained?
To up the results from the default of 100 to 500, yes, add maxResults to the query like this:
r = api.request('tweets/search/%s/:%s' % (PRODUCT, LABEL),
{
'query':SEARCH_TERM,
'fromDate':201501010000, 'toDate':201812310000,
'maxResults':500
})
You can make successive queries to get more results by using the next parameter. But, even easier, you can let TwitterAPI do this for you by using the TwitterPager class. Here is an example:
from TwitterAPI import TwitterAPI, TwitterPager
SEARCH_TERM = '#AAPL OR #FB OR #KO OR #ABT OR #PEPCO'
PRODUCT = 'fullarchive'
LABEL = 'my_label'
api = TwitterAPI(<consumer key>,
<consumer secret>,
<access token key>,
<access token secret>)
pager = TwitterPager(api, 'tweets/search/%s/:%s' % (PRODUCT, LABEL),
{
'query':SEARCH_TERM,
'fromDate':201501010000, 'toDate':201812310000
})
for item in pager.get_iterator():
print(item['text'] if 'text' in item else item)
This example will keep making successive requests with the next parameter until no tweets can be downloaded.
Use the count variable in a raw_query, for example:
results = api.GetSearch(
raw_query="q=twitter%20&result_type=recent&since=2014-07-19&count=100")

How to avoid header while exporting BigQuery table in to Google Storage

I have developed below code which is helping to export BigQuery table in to Google storage bucket. I want to merge files into single file with out header, so that next processes will use file with out any issue.
def export_bq_table_to_gcs(self, table_name):
client = bigquery.Client(project=project_name)
print("Exporting table {}".format(table_name))
dataset_ref = client.dataset(dataset_name,
project=project_name)
dataset = bigquery.Dataset(dataset_ref)
table_ref = dataset.table(table_name)
size_bytes = client.get_table(table_ref).num_bytes
# For tables bigger than 1GB uses Google auto split, otherwise export is forced in a single file.
if size_bytes > 10 ** 9:
destination_uris = [
'gs://{}/{}{}*.csv'.format(bucket_name,
f'{table_name}_temp', uid)]
else:
destination_uris = [
'gs://{}/{}{}.csv'.format(bucket_name,
f'{table_name}_temp', uid)]
extract_job = client.extract_table(table_ref, destination_uris) # API request
result = extract_job.result() # Waits for job to complete.
if result.state != 'DONE' or result.errors:
raise Exception('Failed extract job {} for table {}'.format(result.job_id, table_name))
else:
print('BQ table(s) export completed successfully')
storage_client = storage.Client(project=gs_project_name)
bucket = storage_client.get_bucket(gs_bucket_name)
blob_list = bucket.list_blobs(prefix=f'{table_name}_temp')
print('Merging shard files into single file')
bucket.blob(f'{table_name}.csv').compose(blob_list)
Can you please help me to find a way to skip header.
Thanks,
Raghunath.
We can avoid header by using jobConfig to set the print_header parameter to False. Sample code
job_config = bigquery.job.ExtractJobConfig(print_header=False)
extract_job = client.extract_table(table_ref, destination_uris,
job_config=job_config)
Thanks
You can use skipLeadingRows (https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.googleSheetsOptions.skipLeadingRows)

Node-RED + DB2 - msg : string[18] "No response object"

So, I'm a beginner in Node-RED and need to make a simple API with DB2 queries through flows. I'm using node-red-contrib-db2 to accomplish that. The thing is, I managed to get the results to several payloads to the debugger node, either triggered by timestamp or HTTP Request. However, I can't get these results on HTTP Reply and can't find the reason. Is it a problem with the db2 plugin or just me?
Exported nodes below:
[{"id":"96197abb.fd4098","type":"http in","z":"b4aa8db5.217028","name":"","url":"/wastes","method":"get","upload":false,"swaggerDoc":"","x":150,"y":140,"wires":[["9affb306.caf7e"]]},{"id":"bda39d37.edb418","type":"http response","z":"b4aa8db5.217028","name":"","statusCode":"200","headers":{},"x":940,"y":100,"wires":[]},{"id":"41708443.e4670c","type":"inject","z":"b4aa8db5.217028","name":"","topic":"","payload":"","payloadType":"date","repeat":"","crontab":"","once":false,"onceDelay":0.1,"x":220,"y":40,"wires":[["22a6e217.ead65e"]]},{"id":"9d1e6783.eb246","type":"ibmdb","z":"b4aa8db5.217028","mydb":"3a218407.1cca74","name":"IOCDATA","x":560,"y":40,"wires":[["80e51c1b.23b378"],[]]},{"id":"80e51c1b.23b378","type":"debug","z":"b4aa8db5.217028","name":"","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"payload","x":730,"y":40,"wires":[]},{"id":"22a6e217.ead65e","type":"function","z":"b4aa8db5.217028","name":"SQL Query","func":"msg.database = \"iocdata\";\nmsg.payload = \"select * from viseu.waste_view\";\nreturn msg;","outputs":1,"noerr":0,"x":390,"y":40,"wires":[["9d1e6783.eb246"]]},{"id":"4a6bd014.f39868","type":"ibmdb","z":"b4aa8db5.217028","mydb":"3a218407.1cca74","name":"IOCDATA","x":500,"y":140,"wires":[["bda39d37.edb418","74e28d3e.039be4"],[]]},{"id":"9affb306.caf7e","type":"function","z":"b4aa8db5.217028","name":"SQL Query","func":"msg.database = \"iocdata\";\nmsg.payload = \"select * from viseu.waste_view where id = 1\";\nreturn msg;","outputs":1,"noerr":0,"x":330,"y":140,"wires":[["4a6bd014.f39868"]]},{"id":"74e28d3e.039be4","type":"debug","z":"b4aa8db5.217028","name":"","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"payload","x":950,"y":180,"wires":[]},{"id":"3a218407.1cca74","type":"IbmDBdatabase","z":"","host":"10.102.0.62","port":"50002","db":"iocdata"}]
This is an issue with the ibmdb node you are using - it is not reusing the received message when it sends its results. That means the msg.req and msg.res properties provided by the HTTP In node are not set on the message by the time it reaches the HTTP Response node. This means the response node doesn't not what request to respond to.
To work around the issue, one approach, which isn't ideal, is to store msg.req and msg.res in flow context using a Change node before the ibmdb node, and then copy them back onto the msg after the ibmdb node. This isn't ideal because it can only handle one request at a time.
It would be best to raise an issue against the ibmdb node.
I managed to reach success in my flow, at the cost of many workarounds and variable juggling. But it IS working now. Select count + select rows + join rows where msg.complete is set when count value is reached. Here is the code:
[{"id":"96197abb.fd4098","type":"http in","z":"b4aa8db5.217028","name":"","url":"/wastes","method":"get","upload":false,"swaggerDoc":"","x":90,"y":140,"wires":[["d5f42a96.83f688"]]},{"id":"bda39d37.edb418","type":"http response","z":"b4aa8db5.217028","name":"","statusCode":"200","headers":{},"x":980,"y":260,"wires":[]},{"id":"4a6bd014.f39868","type":"ibmdb","z":"b4aa8db5.217028","mydb":"3a218407.1cca74","name":"SELECT waste_view","x":360,"y":200,"wires":[["35f99a5a.c7f87e"],[]]},{"id":"9affb306.caf7e","type":"function","z":"b4aa8db5.217028","name":"SQL Query","func":"msg.database = \"iocdata\";\nmsg.payload = \"select count(*) from viseu.waste_view\";\n\nreturn msg;","outputs":1,"noerr":0,"x":170,"y":200,"wires":[["4a6bd014.f39868"]]},{"id":"74e28d3e.039be4","type":"debug","z":"b4aa8db5.217028","name":"","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"payload","x":890,"y":380,"wires":[]},{"id":"d5f42a96.83f688","type":"change","z":"b4aa8db5.217028","name":"","rules":[{"t":"set","p":"req","pt":"flow","to":"req","tot":"msg"},{"t":"set","p":"res","pt":"flow","to":"res","tot":"msg"}],"action":"","property":"","from":"","to":"","reg":false,"x":260,"y":140,"wires":[["9affb306.caf7e"]]},{"id":"c3ebb136.aa8988","type":"change","z":"b4aa8db5.217028","name":"","rules":[{"t":"set","p":"req","pt":"msg","to":"req","tot":"flow"},{"t":"set","p":"res","pt":"msg","to":"res","tot":"flow"}],"action":"","property":"","from":"","to":"","reg":false,"x":800,"y":260,"wires":[["bda39d37.edb418"]]},{"id":"ca59ece2.844b3","type":"join","z":"b4aa8db5.217028","name":"","mode":"custom","build":"array","property":"payload","propertyType":"msg","key":"topic","joiner":"\\n","joinerType":"str","accumulate":false,"timeout":"","count":"","reduceRight":false,"reduceExp":"","reduceInit":"","reduceInitType":"","reduceFixup":"","x":630,"y":260,"wires":[["c3ebb136.aa8988","74e28d3e.039be4"]]},{"id":"35f99a5a.c7f87e","type":"function","z":"b4aa8db5.217028","name":"SQL Query","func":"msg.rowcount = msg.payload[1];\nmsg.database = \"iocdata\";\nmsg.payload = \"select * from viseu.waste_view\";// fetch first \" + msg.count[1] + \" rows only\";\n\nreturn msg;","outputs":1,"noerr":0,"x":550,"y":200,"wires":[["327a8ae.a8ce2f6"]]},{"id":"2666e2ba.41dc8e","type":"ibmdb","z":"b4aa8db5.217028","mydb":"3a218407.1cca74","name":"SELECT waste_view","x":800,"y":200,"wires":[["9008e06f.bf6d7"],[]]},{"id":"ec61a7f3.68cf8","type":"debug","z":"b4aa8db5.217028","name":"","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"count","x":650,"y":320,"wires":[]},{"id":"327a8ae.a8ce2f6","type":"change","z":"b4aa8db5.217028","name":"","rules":[{"t":"set","p":"rowcount","pt":"flow","to":"rowcount","tot":"msg"}],"action":"","property":"","from":"","to":"","reg":false,"x":670,"y":140,"wires":[["2666e2ba.41dc8e"]]},{"id":"90204f2d.8bafe8","type":"change","z":"b4aa8db5.217028","name":"","rules":[{"t":"set","p":"rowcount","pt":"msg","to":"rowcount","tot":"flow"}],"action":"","property":"","from":"","to":"","reg":false,"x":310,"y":320,"wires":[["6888cd0d.d00064"]]},{"id":"9008e06f.bf6d7","type":"counter","z":"b4aa8db5.217028","name":"","init":"0","step":"1","lower":"","upper":"","mode":"increment","outputs":"1","x":220,"y":260,"wires":[["90204f2d.8bafe8"]]},{"id":"6888cd0d.d00064","type":"function","z":"b4aa8db5.217028","name":"if rowcount === count","func":"if (msg.count === msg.rowcount) {\n msg.complete = true;\n}\n\nreturn msg;","outputs":1,"noerr":0,"x":440,"y":260,"wires":[["ca59ece2.844b3","ec61a7f3.68cf8","a63f6ad6.26f08"]]},{"id":"a63f6ad6.26f08","type":"debug","z":"b4aa8db5.217028","name":"","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"rowcount","x":660,"y":380,"wires":[]},{"id":"3a218407.1cca74","type":"IbmDBdatabase","z":"","host":"10.102.0.62","port":"50002","db":"iocdata"}]
EDIT 21/02/2018: the previous solution is not very good, because the counter saves its value mysteriously and I can't reset it as I wanted it. That makes the counter surpass the wished rowcount value. So, I had to make my own counter in a function node. New code below:
[{"id":"96197abb.fd4098","type":"http in","z":"b4aa8db5.217028","name":"","url":"/wastes","method":"get","upload":false,"swaggerDoc":"","x":90,"y":60,"wires":[["d5f42a96.83f688"]]},{"id":"bda39d37.edb418","type":"http response","z":"b4aa8db5.217028","name":"","statusCode":"200","headers":{},"x":720,"y":220,"wires":[]},{"id":"4a6bd014.f39868","type":"ibmdb","z":"b4aa8db5.217028","mydb":"3a218407.1cca74","name":"SELECT waste_view","x":740,"y":60,"wires":[["35f99a5a.c7f87e"],[]]},{"id":"9affb306.caf7e","type":"function","z":"b4aa8db5.217028","name":"SQL Query","func":"msg.database = \"iocdata\";\nmsg.payload = \"select count(*) from viseu.waste_view\";\n\nreturn msg;","outputs":1,"noerr":0,"x":530,"y":60,"wires":[["4a6bd014.f39868"]]},{"id":"74e28d3e.039be4","type":"debug","z":"b4aa8db5.217028","name":"","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"payload","x":550,"y":280,"wires":[]},{"id":"d5f42a96.83f688","type":"change","z":"b4aa8db5.217028","name":"save req and res","rules":[{"t":"set","p":"req","pt":"flow","to":"req","tot":"msg"},{"t":"set","p":"res","pt":"flow","to":"res","tot":"msg"}],"action":"","property":"","from":"","to":"","reg":false,"x":290,"y":60,"wires":[["9affb306.caf7e"]]},{"id":"ca59ece2.844b3","type":"join","z":"b4aa8db5.217028","name":"","mode":"custom","build":"array","property":"payload","propertyType":"msg","key":"topic","joiner":"\\n","joinerType":"str","accumulate":false,"timeout":"","count":"msg.count","reduceRight":false,"reduceExp":"","reduceInit":"","reduceInitType":"","reduceFixup":"","x":390,"y":220,"wires":[["74e28d3e.039be4","c3ebb136.aa8988"]]},{"id":"35f99a5a.c7f87e","type":"function","z":"b4aa8db5.217028","name":"SQL Query","func":"msg.rowcount = msg.payload[1];\nmsg.database = \"iocdata\";\nmsg.payload = \"select * from viseu.waste_view\";\n\nreturn msg;","outputs":1,"noerr":0,"x":950,"y":60,"wires":[["327a8ae.a8ce2f6"]]},{"id":"2666e2ba.41dc8e","type":"ibmdb","z":"b4aa8db5.217028","mydb":"3a218407.1cca74","name":"SELECT waste_view","x":380,"y":140,"wires":[["90204f2d.8bafe8"],[]]},{"id":"327a8ae.a8ce2f6","type":"change","z":"b4aa8db5.217028","name":"save rowcount","rules":[{"t":"set","p":"rowcount","pt":"flow","to":"rowcount","tot":"msg"}],"action":"","property":"","from":"","to":"","reg":false,"x":160,"y":140,"wires":[["2666e2ba.41dc8e"]]},{"id":"90204f2d.8bafe8","type":"change","z":"b4aa8db5.217028","name":"get rowcount and count","rules":[{"t":"set","p":"rowcount","pt":"msg","to":"rowcount","tot":"flow"},{"t":"set","p":"count","pt":"msg","to":"count","tot":"flow"}],"action":"","property":"","from":"","to":"","reg":false,"x":630,"y":140,"wires":[["6888cd0d.d00064"]]},{"id":"6888cd0d.d00064","type":"function","z":"b4aa8db5.217028","name":"if count === rowcount","func":"//fix: msg.count ultrapassa msg.rowcount\nmsg.count = msg.count+1 || 1;\n\nif (msg.count === msg.rowcount) {\n msg.complete = true;\n msg.count = 0;\n}\n\nreturn msg;","outputs":1,"noerr":0,"x":880,"y":140,"wires":[["82ecfa98.9473d8"]]},{"id":"c3ebb136.aa8988","type":"change","z":"b4aa8db5.217028","name":"get req, res","rules":[{"t":"set","p":"req","pt":"msg","to":"req","tot":"flow"},{"t":"set","p":"res","pt":"msg","to":"res","tot":"flow"}],"action":"","property":"","from":"","to":"","reg":false,"x":550,"y":220,"wires":[["bda39d37.edb418"]]},{"id":"82ecfa98.9473d8","type":"change","z":"b4aa8db5.217028","name":"save count","rules":[{"t":"set","p":"count","pt":"flow","to":"count","tot":"msg"}],"action":"","property":"","from":"","to":"","reg":false,"x":210,"y":220,"wires":[["ca59ece2.844b3"]]},{"id":"3a218407.1cca74","type":"IbmDBdatabase","z":"","host":"10.102.0.69","port":"50002","db":"iocdata"}]

Rhythmbox plugin. Get one number limited of songs

Get all songs:
for row in self.shell.props.library_source.props.base_query_model:
print(row[0].get_string(RB.RhythmDBPropType.TITLE))
I need get only 10 songs (for example).
First try:
self.shell.props.library_source.props.base_query_model.set_property("limit-value", GLib.Variant("n", 10))
for row in self.shell.props.library_source.props.base_query_model:
print(row[0].get_string(RB.RhythmDBPropType.TITLE))
Result:
Warning: g_object_set_property: construct property "limit-value" for object 'RhythmDBQueryModel' can't be set after construction
self.shell.props.library_source.props.base_query_model.set_property("limit-value", GLib.Variant("n", 10))
Second try: I do not know how to set the limit value because try with GENRE
db = self.shell.props.db
query_model = RB.RhythmDBQueryModel.new_empty(db)
query = GLib.PtrArray()
db.query_append_params(query, RB.RhythmDBQueryType.EQUALS, RB.RhythmDBPropType.GENRE, "Salsa")
db.do_full_query_parsed(query_model, query)
for row in query_model:
print(row[0].get_string(RB.RhythmDBPropType.ARTIST))
Result:
Rhythmbox closed. Error detailed in: How do I query for data in Rhythmbox

Resources