Peewee-async - How to do a simple JOIN (or subquery / prefetch) - python-3.x

I'm stuck on a pretty simple issue with peewee-async regarding JOINs, or perhaps I need to use a subquery, or prefetch... I can't figure it out what kind of query I need to do.
I have 2 database tables (parent/child):
class Group(PeeweeModel):
id = peewee.AutoField()
name = peewee.TextField()
class Channel(PeeweeModel):
id = peewee.AutoField()
name = peewee.TextField()
group = peewee.ForeignKeyField(Group, backref="channels")
I need to fetch 1 group object, and this object has multiple channel objects.
I tried:
q = Group.select(Group, Channel).join(Channel)
But my backref 'channels' is always a ModelQuery instance, not the actual resultset.
Full code
import asyncio
import peewee
import peewee_async
from peewee_async import Manager, PooledPostgresqlDatabase
database = PooledPostgresqlDatabase('test', max_connections=4, user='postgres', password='', host='127.0.0.1')
objects = peewee_async.Manager(database)
class PeeweeModel(peewee.Model):
class Meta:
database = database
class Group(PeeweeModel):
id = peewee.AutoField()
name = peewee.TextField()
class Channel(PeeweeModel):
id = peewee.AutoField()
name = peewee.TextField()
group = peewee.ForeignKeyField(Group, backref="channels")
Group.create_table()
Channel.create_table()
database.set_allow_sync(False)
async def handler():
# create 1 group object
group = await objects.create(Group, name="TestGroup")
# create 2 channel objects, assign to group
await objects.create(Channel, name="TestName1", group=group)
await objects.create(Channel, name="TestName2", group=group)
# Query 1 group, and hopefully it will have the channels
q = Group.select(Group, Channel).join(Channel)
results = await objects.execute(q)
for result in results:
print(result.channels) # problem: Channels is not a list of channel objects, but a `ModelSelect` instead
with objects.allow_sync():
Channel.drop_table(True)
Group.drop_table(True)
loop = asyncio.get_event_loop()
loop.run_until_complete(handler())
loop.close()

I was able to get help from an expert™ and the solution is to use prefetch():
async def handler():
# create 1 group object
group = await objects.create(Group, name="TestGroup")
# create 2 channel objects, assign to group
await objects.create(Channel, name="TestName", group=group)
await objects.create(Channel, name="TestName", group=group)
# Query 1 group, and hopefully it will have the channels
q = Group.select(Group)
groups = await objects.prefetch(q, Channel.select(Channel))
for group in groups:
print(group, group.channels) # channels is a list of channels.
with objects.allow_sync():
Channel.drop_table(True)
Group.drop_table(True)
Peewee will figure out the relationship (backref) by itself.

Related

Pyrogram raw method DeleteChatUser not working

This code:
usr = await app.resolve_peer(uid)
udata = InputUser(user_id=usr.user_id, access_hash=usr.access_hash)
r = await app.invoke(functions.messages.DeleteChatUser(chat_id=chan, user_id=udata))
print(r)
Returns:
AttributeError: 'InputPeerChannel' object has no attribute 'to_bytes'
In docs:
class pyrogram.raw.functions.messages.DeleteChatUser**
Deletes a user from a chat and sends a service message on it.
Parameters:
chat_id (int 64-bit) – Chat ID.
user_id (InputUser) – User ID to be deleted.
revoke_history (bool, optional) – Remove the entire chat history of the specified user in this chat.
What`s wrong?
Maybe my udata in the wrong type?
I'm not sure, but "DeleteChatUser" it seems to only work for groups, not channels.
For groups maybe working code:
cid = -10083757838484 # Example group_id
usr = await app.resolve_peer(uid)
if cid < 0:
cid = cid * (-1) # Removing a minus from group_id
udata = InputUser(user_id=usr.user_id, access_hash=usr.access_hash)
r = await app.invoke(functions.messages.DeleteChatUser(chat_id=cid, user_id=udata))
print(r)
But I needed a solution for the channel, so I used:
r = await app.ban_chat_member(int(cid), int(usr))

how to fetch data through multiple account with threading in python3

I want to achieve a function that can fetch data in parallel.
The background is the information of 100 sites can be fetched from site A.
the same account can't be used more than once at a time, so I created 5 different accounts on site A that eanble me to fetch information with 5 accounts.
account info like
worker1 pawd
worker2 pawd
worker3 pawd
worker4 pawd
worker5 pawd
if you want to get information of site B from site A .
then you need to type cmd like get info for siteB_IP on site A.
suppose there are 100 IPs are stored in a list names IPlist
how to fetch information of 100 IPs with 5 avaliable accounts in parallel by threading , and then
all of the information can be sotored in a variable without conflict.
what I have tried is below , below codes can not be executed due to I have no way to achieve the solution:
import threading
user = 'root'
pwd = 'Changeme123'
# the first step is to logon with default account
rs = link.send_cmd(r':lognew:' + '"' + user + '","' + pwd + '"')
# then get all nebor ip from the logon site, the function parse_multi is used for parsing data
IPlist = parse_multi(link.send_cmd('get-IP-info:0xffff'))
def Fetchinfo(user, ip):
rs = link.send_cmd(r':lognew:' + '"' + user + '","' + pwd + '"')
areainfo = link.send_cmd('get info for ' + site_IP)
for ip in IPlist:
# how to handle 100 IPs in the situstion of 5 accounts avaliable ?
thread = threading.thread(target = Fetchinfo, args = [worker, ip]
Since you don't want calls from the same account id and passwords to happen concurrently, you can define a function that sequentially loops through a sub-list of IPs to fetch synchronously:
def fetch_data_for_ips(account_id, account_password, ips_to_fetch):
results = list()
for ip_to_fetch in ips_to_fetch:
# fetch with the account_id and password synchronously
result = ...
results.append(result)
return results // Added this
Then, use a thread pool, to run the different batches concurrently for each account:
from concurrent.futures import ThreadPoolExecutor, as_completed
# Split the workload for each account to fetch
num, remainder = divmod(len(ip_list), len(accounts))
num_ips_for_each_account = num + bool(remainder)
# This gives e.g. [[1,2,3], [4,5,6]], where each sublist is for each account to fetch
ip_lists_for_each_account = [ip_list[i: i + num_ips_for_each_account] for i in range(0, len(ip_list), num_ips_for_each_account)]
# You should only need number of threads = to the number of accounts you have
with ThreadPoolExecutor(len(accounts)) as executor:
# Feel free to use a set instead if you don't need to know which result came from which thread
futures = dict()
results = list()
for (account_id, account_password), ips_to_fetch in zip(accounts, ip_lists_for_each_account):
future = executor.submit(fetch_data_for_ips, account_id, account_password, ips_to_fetch)
futures[future] = account_id
for future in as_completed(futures):
result = future.result()
account_id = futures[future]
print(f'{account_id} fetched these:', result)
results.extend(result)
you can refer to below sample code as rcshon suggested .
def fetch_data_for_ips(account_id,ips_to_fetch):
results = list()
for ip_to_fetch in ips_to_fetch:
# fetch with the account_id and password synchronously
result = ','.join((account_id,ip_to_fetch))
results.append(result)
return results
from concurrent.futures import ThreadPoolExecutor, as_completed
accounts = ['worker1','worker2','worker3','worker4','worker5']
ip_list = [str(_) for _ in range(10)]
# Split the workload for each account to fetch
num, remainder = divmod(len(ip_list), len(accounts))
num_ips_for_each_account = num + bool(remainder)
# This gives e.g. [[1,2,3], [4,5,6]], where each sublist is for each account to fetch
ip_lists_for_each_account = [ip_list[i: i + num_ips_for_each_account] for i in range(0, len(ip_list), num_ips_for_each_account)]
# You should only need number of threads = to the number of accounts you have
with ThreadPoolExecutor(len(accounts)) as executor:
# Feel free to use a set instead if you don't need to know which result came from which thread
futures = dict()
results = list()
for account_id, ips_to_fetch in zip(accounts, ip_lists_for_each_account):
future = executor.submit(fetch_data_for_ips, account_id, ips_to_fetch)
futures[future] = account_id
for future in as_completed(futures):
result = future.result()
account_id = futures[future]
print(f'{account_id} fetched these:', result)
results.extend(result)
output :
worker3 fetched these: ['worker3,4', 'worker3,5']
worker2 fetched these: ['worker2,2', 'worker2,3']
worker1 fetched these: ['worker1,0', 'worker1,1']
worker4 fetched these: ['worker4,6', 'worker4,7']
worker5 fetched these: ['worker5,8', 'worker5,9']

How to add multiple fields' reference to "unique_together" error message

I have a model with multiple fields being checked for uniqueness:
class AudQuestionList(BaseTimeStampModel):
aud_ques_list_id = models.AutoField(primary_key=True,...
aud_ques_list_num = models.CharField(max_length=26,...
aud_ques_list_doc_type = models.ForeignKey(DocType,...
short_text = models.CharField(max_length=55,...
aud_scope_standards = models.ForeignKey(ScopeStandard, ...
aud_freqency = models.ForeignKey(AuditFrequency, ...
aud_process = models.ForeignKey(AuditProcesses, ...
unique_together = [['aud_scope_standards', 'aud_freqency', 'aud_process',],]
My model form is as described below:
class CreateAudQuestionListForm(forms.ModelForm):
class Meta:
model = AudQuestionList
fields = ('aud_ques_list_doc_type', 'aud_scope_standards', 'aud_freqency', 'aud_process', 'short_text', ...
def validate_unique(self):
try:
self.instance.validate_unique()
except ValidationError:
self._update_errors({'aud_scope_standards': _('Record exists for the combination of key values.')})
The scenario works perfectly well, only that the field names (labels) itself are missing from the message.
Is there a way to add the field names to the message above, say something like:
Record exists for the combination of key fields + %(field_labels)s.

How to get deleted users from channel (telethon)?

In telegram when I click Subscribers it shows me about 50 last users and about 150-200 deleted users.
I tried this:
async for user in client.iter_participants(chat_id):
if user.deleted:
print(user)
This gives me only last 50 users and 6-8 deleted users. I need all 150-200 deleted users. How can I get them?
I solved this problem using GetParticipantsRequest with offset parameter somehow like this:
from telethon.tl.functions.channels import GetParticipantsRequest
from telethon.tl.types import ChannelParticipantsSearch
chat_id = -123456
offset = 0
while True:
participants = await client(GetParticipantsRequest(
channel=chat_id,
filter=ChannelParticipantsSearch(''),
offset=offset,
limit=10000,
hash=0
))
deleted_users = []
for user in participants:
if user.deleted:
deleted_users.append(user)
if not deleted_users:
break
# doings with deleted_users
Not sure about iter_participants, but get_participants works in my case.
channel_id = -1234567890 # TODO: add channel id
users = client.get_participants(client.get_input_entity(channel_id))
for user in users:
if user.deleted:
print(user)

AWS Cognito 90 day automated Password rotation

I have a requirement to create an automated password reset script. I created a custom field in order to try and track this and also hope I can access some of the standard fields. This script should find users with the following criteria:
The latest of any of the following 3 dates that are >= 90 days ago : Sign_Up, Forgot_Password, or custom:pwdCreateDate
I can't seem to find any boto3 cognito client ways of getting the information on this except for forgot password which shows up in admin_list_user_auth_events and that response doesn't include username in the response. I suppose since you provide username to get the events you can figure out a way to find the latest forgot password from the events and tie it to the username.
Has anyone else implemented any boto3 automation to set the account to force password reset based on any of these fields?
here is where i landed, take it with the understanding that coginito has some limitations which make true flawless password rotation difficult. Also know if you can make the script more efficient you should because in lambda you probably time out with more than about 350 users due to the 5RPS on the admin API.
Prerequisites : set the lambda function to 5 concurrency or you will exceed the limit of 5RPS. 1 mutable field in your cognito userpool attributes to put a date in. a custom lambda zip file that includes pandas saved to s3.
import os
import sys
# this adds the parent directory of bin so we can find the module
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parent_dir)
#This addes venv lib/python2.7/site-packages/ to the search path
mod_path = os.path.abspath(parent_dir+"/lib/python"+str(sys.version_info[0])+"."+str(sys.version_info[1])+"/site-packages/")
sys.path.append(mod_path)
import boto3
import datetime
import pandas as pd
import time
current_path = os.path.dirname(os.path.realpath(__file__))
# Use this one for the parent directory
ENV_ROOT = os.path.abspath(os.path.join(current_path, os.path.pardir))
# Use this one for the current directory
#ENV_ROOT = os.path.abspath(os.path.join(current_path))
sys.path.append(ENV_ROOT)
#if __name__ == "__main__":
def lambda_handler(event, context):
user_pool_id = os.environ['USER_POOL_ID']
idp_client = boto3.client('cognito-idp')
users_list = []
page_token = None
dateToday = datetime.datetime.today().date()
def update_user(user) :
idp_client.admin_update_user_attributes(
UserPoolId = user_pool_id,
Username = user,
UserStatus = 'RESET_REQUIRED',
UserAttributes = [
{
'Name': 'custom:pwdCreateDate',
'Value': str(dateToday)
}
]
)
users = idp_client.list_users(
UserPoolId = user_pool_id
)
for user in users['Users']: users_list.append(user['Username'])
page_token = users['PaginationToken']
while 'PaginationToken' in users :
users = idp_client.list_users(
UserPoolId = user_pool_id,
PaginationToken = page_token
)
for user in users["Users"]: users_list.append(user["Username"])
if 'PaginationToken' in users :
page_token = users['PaginationToken']
attrPwdDates = []
for i in range(len(users_list)) :
userAttributes = idp_client.admin_get_user(
UserPoolId = user_pool_id,
Username = users_list[i]
)
for a in userAttributes['UserAttributes'] :
if a['Name'] == 'custom:pwdCreateDate' :
attrPwdDates.append(datetime.datetime.strptime(a['Value'], '%Y-%m-%d %H:%M:%S.%f').date())
time.sleep(1.0)
list_of_userattr_tuples = list(zip(users_list, attrPwdDates))
df1 = pd.DataFrame(list_of_userattr_tuples,columns = ['Username','Password_Last_Set'])
authPwdDates = []
for i in range(len(users_list)) :
authEvents = idp_client.admin_list_user_auth_events(
UserPoolId = user_pool_id,
Username = users_list[i]
)
for event in authEvents['AuthEvents'] :
if event['EventType'] == 'ForgotPassword' and event['EventResponse'] == 'Pass' :
authPwdDates.append(event['CreationDate'].date())
break
time.sleep(1.0)
list_of_userauth_tuples = list(zip(users_list, authPwdDates))
df2 = pd.DataFrame(list_of_userauth_tuples,columns = ['Username','Password_Last_Forgot'])
df3 = df1.merge(df2,how='left', on = 'Username')
df3[['Password_Last_Set','Password_Last_Forgot']] = df3[['Password_Last_Set','Password_Last_Forgot']].apply(pd.to_datetime)
cols = ['Password_Last_Set','Password_Last_Forgot']
df4 = df3.loc[df3[cols].max(axis=1)<=pd.Timestamp.now() - pd.Timedelta(90, unit='d'), 'Username']
for i,r in df4.iterrows() :
update_user(r['Username'])

Resources