I am trying to make my discord bot send a message from a random date in the server's history but the around argument doesn't seem to work properly
def random_date():
d1 = date(year=2020, month=3,day=16)
d2= date.today()
delta = d2 - d1
int_delta = delta.days
random_number_of_days = random.randrange(int_delta)
ran = random_number_of_days
global day
day = d1 + timedelta(ran)
# en = d1 + timedelta(ran + 1)
return day
#commands.command(name='quote')
async def quote(self,message):
day = random_date()
messages = await message.channel.history(around=day).flatten()
msg = random.choice(messages)
embed = discord.Embed(description=f'{msg.content}' , color=message.author.color, timestamp=msg.created_at)
embed.set_author(name=f"{msg.author.name}#{msg.author.discriminator}", icon_url=msg.author.avatar_url)
embed.add_field(name = 'Source' , value = f"[Jump]({msg.jump_url})")
embed.set_footer(text=f"Found in : #{message.channel.name}")
await message.channel.send(embed=embed)
See https://discordpy.readthedocs.io/en/latest/api.html?highlight=channel%20history#discord.TextChannel.history
You need to use datetime.datetime()
Also see: https://docs.python.org/3/library/datetime.html#datetime.datetime if you need help with that
Related
I want to generate auto generate book fine of 10% of book cost. I have written the following code but nothing happens. No error comes and not working. book_cost field is in book module.
Please check code.
issue_date = fields.Date('Issue Date', required=True, tracking=True)
due_date = fields.Date('Due Date', required=True, tracking=True)
book_ids = fields.Many2many('odooschool.library.books','tch_book_rel','book_name','teacher_id','Issued Books')
sequence = fields.Integer('sequence')
fine_amount = fields.Char('Fine Amount', compute='_get_cost_details')
submission_date = fields.Date.today()
price = fields.Char('Price')
#api.depends('due_date','book_ids.book_cost')
def _get_cost_details(self):
market_multiplier = 0
date_return = fields.Date()
for rec in self:
fine_amount = 0
if rec.due_date and rec.submission_date and rec.due_date > rec.submission_date:
date_return = (rec.due_date - rec.submission_date)
market_multiplier = int(decimal.Decimal('0.10'))
fine_amount = rec.book_ids.book_cost * market_multiplier
rec.fine_amount += rec.fine_amount
I think if you replace
submission_date = fields.Date.today()
by
submission_date = fields.Date(default= fields.Date.today)
That will be work. Cause the submission_date in your code is always the starting date of Odoo server.
Regards
I'm doing a web scraping data university research project. I started working on a ready GitHub project, but this project does not retrieve all the data.
The project works like this:
Search Google using keywords: example: (accountant 'email me at' Google)
Extract a snippet.
Retrieve data from this snippet.
The issue is:
The snippets extracted are like this: " ... marketing division in 2009. For more information on career opportunities with our company, email me: vicki#productivedentist.com. Neighborhood Smiles, LLCĀ ..."
The snippet does not show all, the "..." hides information like role, location... How can I retrieve all the information with the script?
from googleapiclient.discovery import build #For using Google Custom Search Engine API
import datetime as dt #Importing system date for the naming of the output file.
import sys
from xlwt import Workbook #For working on xls file.
import re #For email search using regex.
if __name__ == '__main__':
# Create an output file name in the format "srch_res_yyyyMMdd_hhmmss.xls in output folder"
now_sfx = dt.datetime.now().strftime('%Y%m%d_%H%M%S')
output_dir = './output/'
output_fname = output_dir + 'srch_res_' + now_sfx + '.xls'
search_term = sys.argv[1]
num_requests = int(sys.argv[2])
my_api_key = "replace_with_you_api_key" #Read readme.md to know how to get you api key.
my_cse_id = "011658049436509675749:gkuaxghjf5u" #Google CSE which searches possible LinkedIn profile according to query.
service = build("customsearch", "v1", developerKey=my_api_key)
wb=Workbook()
sheet1 = wb.add_sheet(search_term[0:15])
wb.save(output_fname)
sheet1.write(0,0,'Name')
sheet1.write(0,1,'Profile Link')
sheet1.write(0,2,'Snippet')
sheet1.write(0,3,'Present Organisation')
sheet1.write(0,4,'Location')
sheet1.write(0,5,'Role')
sheet1.write(0,6,'Email')
sheet1.col(0).width = 256 * 20
sheet1.col(1).width = 256 * 50
sheet1.col(2).width = 256 * 100
sheet1.col(3).width = 256 * 20
sheet1.col(4).width = 256 * 20
sheet1.col(5).width = 256 * 50
sheet1.col(6).width = 256 * 50
wb.save(output_fname)
row = 1 #To insert the data in the next row.
#Function to perform google search.
def google_search(search_term, cse_id, start_val, **kwargs):
res = service.cse().list(q=search_term, cx=cse_id, start=start_val, **kwargs).execute()
return res
for i in range(0, num_requests):
# This is the offset from the beginning to start getting the results from
start_val = 1 + (i * 10)
# Make an HTTP request object
results = google_search(search_term,
my_cse_id,
start_val,
num=10 #num value can be 1 to 10. It will give the no. of results.
)
for profile in range (0, 10):
snippet = results['items'][profile]['snippet']
myList = [item for item in snippet.split('\n')]
newSnippet = ' '.join(myList)
contain = re.search(r'[\w\.-]+#[\w\.-]+', newSnippet)
if contain is not None:
title = results['items'][profile]['title']
link = results['items'][profile]['link']
org = "-NA-"
location = "-NA-"
role = "-NA-"
if 'person' in results['items'][profile]['pagemap']:
if 'org' in results['items'][profile]['pagemap']['person'][0]:
org = results['items'][profile]['pagemap']['person'][0]['org']
if 'location' in results['items'][profile]['pagemap']['person'][0]:
location = results['items'][profile]['pagemap']['person'][0]['location']
if 'role' in results['items'][profile]['pagemap']['person'][0]:
role = results['items'][profile]['pagemap']['person'][0]['role']
print(title[:-23])
sheet1.write(row,0,title[:-23])
sheet1.write(row,1,link)
sheet1.write(row,2,newSnippet)
sheet1.write(row,3,org)
sheet1.write(row,4,location)
sheet1.write(row,5,role)
sheet1.write(row,6,contain[0])
print('Wrote {} search result(s)...'.format(row))
wb.save(output_fname)
row = row + 1
print('Output file "{}" written.'.format(output_fname))
I am working on face recognition system for my academic project. I want to set the first time an employee was recognized as his first active time and the next time he is being recognized should be recorded as his last active time and then calculate the total active hours based on first active and last active time.
I tried with the following code but I'm getting only the current system time as the start time. can someone help me on what I am doing wrong.
Code:
data = pickle.loads(open(args["encodings"], "rb").read())
vs = VideoStream(src=0).start()
writers = None
time.sleep(2.0)
while True:
frame = vs.read()
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
rgb = imutils.resize(frame, width=750)
r = frame.shape[1] / float(rgb.shape[1])
boxes = face_recognition.face_locations(rgb)
encodings = face_recognition.face_encodings(rgb, boxes)
names = []
face_names = []
for encoding in encodings:
matches = face_recognition.compare_faces(data["encodings"],
encoding)
name = "Unknown"
if True in matches:
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
name = max(counts, key=counts.get)
names.append(name)
if names != []:
for i in names:
first_active_time = datetime.now().strftime('%H:%M')
last_active_time = datetime.now().strftime('%H:%M')
difference = datetime.strptime(first_active_time, '%H:%M') - datetime.strptime(last_active_time, '%H:%M')
difference = difference.total_seconds()
total_hours = time.strftime("%H:%M", time.gmtime(difference))
face_names.append([i, first_active_time, last_active_time, total_hours])
I am trying to get this for_log coroutine to work, what im trying to do is delete all messages from a specific month, in this case August, I went around the Internet and documentation, and also with the help from another question I posted here and came up with this:
#Client.command(pass_context = True)
async def clear(ctx, number: int, month, year):
def around_month(month, year):
begin = datetime.strptime(f'1 {month} {year}', '%d %b %Y')
if begin.month == 12:
end = datetime(begin.year+1, 1, 1)
else:
end = datetime(begin.year, begin.month, 1)
return begin, end
if ctx.message.author.id not in AdminIDs:
await Client.send_message(ctx.message.channel, 'You do not have permission to use this command')
return
counter = 0
begin, end = around_month(month, year)
tmsg = await Client.send_message(ctx.message.channel, 'Progress: 0/' + str(number))
async for x in Client.logs_from(ctx.message.channel, limit = number, after=begin, before=end):
counter += 1
print(counter)
await Client.edit_message(tmsg, 'Progress:' + counter + '/' + str(number))
await Client.delete_messages(x)
await asyncio.sleep(1.5)
await Client.send_message(ctx.message.channel, 'Operation completed! ' + 'Cleared: ' + str(counter) + ' items')
and then use !fclear 100 AUG 2018
This looks fine at first glance, but for some reason, it does not delete any messages, I inserted a counter to see if the for loop actually goes through the counter, and it gets nothing, it's still zero, interestingly enough, I tried to print the counter while it was inside the for loop, and it didn't print it to the console, the only reason I could think for that to happen is if it doesn't go to the for loop at all which may be because it doesn't find any messages? I'm not sure. There are no errors in the console or anywhere else
It gets nothing
This is a correct deduction. Working back from this, you'll find that around_month returns a tuple of the same date.
begin, end = around_month(month, year)
begin == end # True
Taken from the Discord docs 1,
The before, after, and around keys are mutually exclusive, only one may be passed at a time.
Given your need to delete messages from a specific month, you will need to fetch messages in that month until you exhaust the list of messages created in that month.
Also you need to invoke Client.logs_from with the snowflake time. There is a handy utility function for converting datetime object to a time snowflake. 2
from discord.utils import time_snowflake
from datetime import time delta
# ...
def before_time(messages, before):
return [
message for message in messages
if message.id < before
]
after_dt = datetime.strptime(f'1 {month} {year}', '%d %b %Y')
before_dt = (after_dt + timedelta(days=31)).replace(day=1)
after = time_snowflake(after_dt)
before = time_snowflake(before_dt)
messages = await Client.logs_from(
ctx.message.channel, limit=number, after=after
)
marked_for_deletion = before_time(messages, before)
while marked_for_deletion:
await Client.delete_messages(marked_for_deletion)
messages = await Client.logs_from(
ctx.message.channel, limit=number, after=after
)
marked_for_deletion = before_time(messages, before)
await asyncio.sleep(1.5)
I can not get the above information using github.api. Reading the documentation did not help much. There is still no complete understanding of the work with dates. Here is an example of my code for getting open issues:
import requests
import json
from datetime import datetime
username = '\'
password = '\'
another_page = True
opened = 0
closed = 0
api_oldest = 'https://api.github.com/repos/grpc/grpc/issues?
per_page=5&q=sort=created:>`date -v-14d "+%Y-%m-%d"`&order=asc'
api_issue = 'https://api.github.com/repos/grpc/grpc/issues?
page=1&per_page=5000'
api_pulls = 'https://api.github.com/repos/grpc/grpc/pulls?page=1'
datetime.now()
while another_page:
r = requests.get(api_issue, auth=(username, password))
#json_response = json.loads(r.text)
#results.append(json_response)
if 'next' in r.links:
api_issue = r.links['next']['url']
if item['state'] == 'open':
opened += 1
else:
closed += 1
else:
another_page=False
datetime.now()
print(opened)
There are a few issues with your code. For example, what does item represent ?. Your code can be modified as follows to iterate and get the number of open issues .
import requests
username = '/'
password = '/'
another_page = True
opened = 0
closed = 0
api_issue = "https://api.github.com/repos/grpc/grpc/issues?page=1&per_page=5000"
while another_page:
r = requests.get(api_issue, auth=(username, password))
json_response = r.json()
#results.append(json_response)
for item in json_response:
if item['state'] == 'open':
opened += 1
else:
closed += 1
if 'next' in r.links:
api_issue = r.links['next']['url']
else:
another_page=False
print(opened)
If you want issues that were created in the last 14 days, you could make the api request using the following URL.
api_oldest = "https://api.github.com/repos/grpc/grpc/issues?q=sort=created:>`date -d '14 days ago'`&order=asc"