Get all songs:
for row in self.shell.props.library_source.props.base_query_model:
print(row[0].get_string(RB.RhythmDBPropType.TITLE))
I need get only 10 songs (for example).
First try:
self.shell.props.library_source.props.base_query_model.set_property("limit-value", GLib.Variant("n", 10))
for row in self.shell.props.library_source.props.base_query_model:
print(row[0].get_string(RB.RhythmDBPropType.TITLE))
Result:
Warning: g_object_set_property: construct property "limit-value" for object 'RhythmDBQueryModel' can't be set after construction
self.shell.props.library_source.props.base_query_model.set_property("limit-value", GLib.Variant("n", 10))
Second try: I do not know how to set the limit value because try with GENRE
db = self.shell.props.db
query_model = RB.RhythmDBQueryModel.new_empty(db)
query = GLib.PtrArray()
db.query_append_params(query, RB.RhythmDBQueryType.EQUALS, RB.RhythmDBPropType.GENRE, "Salsa")
db.do_full_query_parsed(query_model, query)
for row in query_model:
print(row[0].get_string(RB.RhythmDBPropType.ARTIST))
Result:
Rhythmbox closed. Error detailed in: How do I query for data in Rhythmbox
Related
I am trying to select rows and fetch them from the DB table and then insert them into a list so I can insert all of the rows at once into the database, but I got an error.
def paid_or_returned_buyingchecks(self):
date = datetime.now()
now = date.strftime('%Y-%m-%d')
self.tenlistchecks=[]
self.con = sqlite3.connect('car dealership.db')
self.cursorObj = self.con.cursor()
self.dashboard_buying_checks_dates = self.cursorObj.execute("select id, paymentdate , paymentvalue, car ,sellername from cars_buying_checks where nexttendays=?",(now,))
self.dashboard_buying_checks_dates_output = self.cursorObj.fetchall()
self.tenlistchecks.append(self.dashboard_buying_checks_dates_output)
print(self.tenlistchecks)
self.dashboard_buying_checks_dates = self.cursorObj.executemany("insert into paid_buying_checks VALUES(?,?,?,?,?)",[self.tenlistchecks])
self.con.commit()
but I got an error :
[[(120, '21-08-2022', '1112', 'Alfa Romeo', 'james'), (122, '21-08-2022', '465', 'Buick', 'daniel '), (123, '21-08-2022', '789', 'Buick', 'daniel ')]]
self.dashboard_buying_checks_dates = self.cursorObj.executemany(
sqlite3.ProgrammingError: Incorrect number of bindings supplied. The current statement uses 5, and there are 1 supplied.
self.cursorObj.fetchall() returns a list of tuples, which is what you need to feed to executemany, so
self.cursorObj.executemany("insert into paid_buying_checks VALUES(?,?,?,?,?)",self.tenlistchecks)
not
self.cursorObj.executemany("insert into paid_buying_checks VALUES(?,?,?,?,?)",[self.tenlistchecks])
from neo4j import GraphDatabase, basic_auth
driver = GraphDatabase.driver(
"neo4j://34.201.9.108:7687",
auth=basic_auth("neo4j", "chart-certifications-bottom"))
cypher_query = '''
MATCH (m:Movie {title:$movie})<-[:RATED]-(u:User)-[:RATED]->(rec:Movie)
RETURN distinct rec.title AS recommendation LIMIT 20
'''
with driver.session(database="neo4j") as session:
results = session.read_transaction(
lambda tx: tx.run(cypher_query,
movie="Crimson Tide").data())
for record in results:
print(record['recommendation']) #<----------------------- OK
driver.close()
try to modify before display put in variable, but error
print(record['recommendation']) become temp=record['recommendation']
#app.get("/neo4j")
def graph_db():
driver = GraphDatabase.driver(
"neo4j://34.201.9.108:7687",
auth=basic_auth("neo4j", "chart-certifications-bottom"))
cypher_query = '''
MATCH (n:Person) RETURN n LIMIT 25
'''
with driver.session(database="neo4j") as session:
results = session.read_transaction(
lambda tx: tx.run(cypher_query,
movie="Crimson Tide").data())
data=[]
for record in results:
temp=record['recommendation'] #<-----------------------error
data.append(temp)
result=data
driver.close()
return{"Result ":result}
When you replaced your query to
MATCH (n:Person) RETURN n LIMIT 25
You are returning nodes of 25 Person and the column recommendation does not exists.
temp=record['recommendation']
Make sure that the result set that your query is returning is the same with the column name that you are accessing.
I am trying to create a virtual table in HANA based on a remote system table view.
If I run it at the command line using hdbsql
hdbsql H00=> create virtual table HanaIndexTable at "SYSRDL#CG_SOURCE"."<NULL>"."dbo"."sysiqvindex"
0 rows affected (overall time 305.661 msec; server time 215.870 msec)
I am able to select from HanaIndexTable and get results and see my index.
When I code it in python, I use the following command:
cursor.execute("""create virtual table HanaIndexTable1 at SYSRDL#CG_source.\<NULL\>.dbo.sysiqvindex""")
I think there is a problem with the NULL. But I see in the output that the escape key is doubled.
self = <hdbcli.dbapi.Cursor object at 0x7f02d61f43d0>
operation = 'create virtual table HanaIndexTable1 at SYSRDL#CG_source.\\<NULL\\>.dbo.sysiqvindex'
parameters = None
def __execute(self, operation, parameters = None):
# parameters is already checked as None or Tuple type.
> ret = self.__cursor.execute(operation, parameters=parameters, scrollable=self._scrollable)
E hdbcli.dbapi.ProgrammingError: (257, 'sql syntax error: incorrect syntax near "\\": line 1 col 58 (at pos 58)')
/usr/local/lib/python3.7/site-packages/hdbcli/dbapi.py:69: ProgrammingError
I have tried to run the command without the <> but get the following error.
hdbcli.dbapi.ProgrammingError: (257, 'sql syntax error: incorrect syntax near "NULL": line 1 col 58 (at pos 58)')
I have tried upper case, lower case and escaping. Is what I am trying to do impossible?
There was an issue with capitalization between HANA and my remote source. I also needed more escaping rather than less.
I have a map in firestore and I want to regularly update it (not overwrite previous keys). Most of the times it works, however sometimes it fails and it does not throw any exception. The only indication that something went wrong is that the result (https://cloud.google.com/firestore/docs/reference/rest/v1/WriteResult) has an update_time which I can compare to now() and if the difference is too large I know it did not do an update now. The problem is after that the whole map is missing (all previous keys are gone). So not only it failed to add the current keys but somehow it wiped out the whole field.
Below is the full code:
error_keys = []
for key in data.keys():
# continue if set is empty
if not data[key]:
continue
try:
new_keys = {
f'myMap.{k}': v for k, v in data[key].items()}
result = self.db.collection(u'myCollection').document(key).update(
new_keys
)
now = datetime.now(tz=pytz.utc)
dt_string = now.strftime("%d/%m/%Y %H:%M:%S.%fZ")
duration = now - result.update_time #
duration_in_s = duration.total_seconds()
minutes = divmod(duration_in_s, 60)[0]
if minutes > 1.0:
logger.warning("Diff to update time is larger that 1 min")
logger.info(f'Now: {dt_string}')
logger.info(f'Duration in minutes: {minutes}')
logger.info(f'Adding {key} to error_keys')
error_keys.append(key)
logger.info(f'KEY: {key}: {data[key]} Update_time: {result.update_time} Diff_minutes: {minutes}')
except:
logger.warning(
f'Could not key: {key} with data {data[key]} to firebase.')
error_keys.append(key)
logger.exception('Exception writing keys')
logger.info(f'ERROR_KEYS: {error_keys}')
return error_keys
I am using:
google-cloud-firestore 2.1.0
Python 3.7.3
Novice programmer here seeking help.
I already set up my code to my requirements to use the Twitter's premium API.
SEARCH_TERM = '#AAPL OR #FB OR #KO OR #ABT OR #PEPCO'
PRODUCT = 'fullarchive'
LABEL = 'my_label'
r = api.request('tweets/search/%s/:%s' % (PRODUCT, LABEL),
{'query':SEARCH_TERM, 'fromDate':201501010000, 'toDate':201812310000})
However, when I run it I obtain the maximum number of tweets per search which is 500.
My question is should I add to the query maxResults = 500? And how do I use the next parameter to keep the code running until all the tweets that correspond to my query are obtained?
To up the results from the default of 100 to 500, yes, add maxResults to the query like this:
r = api.request('tweets/search/%s/:%s' % (PRODUCT, LABEL),
{
'query':SEARCH_TERM,
'fromDate':201501010000, 'toDate':201812310000,
'maxResults':500
})
You can make successive queries to get more results by using the next parameter. But, even easier, you can let TwitterAPI do this for you by using the TwitterPager class. Here is an example:
from TwitterAPI import TwitterAPI, TwitterPager
SEARCH_TERM = '#AAPL OR #FB OR #KO OR #ABT OR #PEPCO'
PRODUCT = 'fullarchive'
LABEL = 'my_label'
api = TwitterAPI(<consumer key>,
<consumer secret>,
<access token key>,
<access token secret>)
pager = TwitterPager(api, 'tweets/search/%s/:%s' % (PRODUCT, LABEL),
{
'query':SEARCH_TERM,
'fromDate':201501010000, 'toDate':201812310000
})
for item in pager.get_iterator():
print(item['text'] if 'text' in item else item)
This example will keep making successive requests with the next parameter until no tweets can be downloaded.
Use the count variable in a raw_query, for example:
results = api.GetSearch(
raw_query="q=twitter%20&result_type=recent&since=2014-07-19&count=100")