Python -> reminder not popping - python-3.x

I am doing a project on a notification program that reads an excel file as you edit it and then sends a notification on the time you set the reminder.
The code is here:
import pandas as pd
import datetime
from plyer import notification
def remind(title, message):
notification.notify(title=title, message=message, timeout=10)
if __name__ == '__main__':
while True:
df = pd.read_excel("./reminder.xlsx")
for index, item in df.iterrows():
time = datetime.datetime.now().strftime("%H:%M:%S")
if time == item['Time']:
remind(item['Title'], item['Message'])
else:
continue
The program is running fine but the reminder is not popping up
i am working on python 3.7.9 on win8 (64-bit)

from numpy import nan
import pandas as pd
import datetime
from plyer import notification
import time
def notifyMe(title, message,icon):
notification.notify(
title=title,
message=message,
app_icon = icon,
timeout=10)
if __name__ == '__main__':
notifyMe("Hello","We are in together in these tough time","virusicon.ico")
df = pd.read_excel("reminder.xlsx")
df["Icon"] = df["Icon"].astype(str)
df["Heading"] = df["Heading"].astype(str)
df["Message"] = df["Message"].astype(str)
number_of_rows = len(df.index)
while True:
flag = True
for item in range(0,number_of_rows):
now = datetime.datetime.strptime(datetime.datetime.now().strftime("%H:%M:%S"),"%H:%M:%S")
x = datetime.datetime.strptime(df['Time'][item].strftime("%H:%M:%S"),"%H:%M:%S")
if x==now:
print(True)
print(df['Heading'][item], df['Message'][item],df["Icon"][item])
if df["Icon"][item]=="nan":
df["Icon"][item]=""
if df["Message"][item]=="nan":
df["Message"][item]=""
if df["Heading"][item]=="nan":
df["Heading"][item]=""
notifyMe(df['Heading'][item], df['Message'][item],df['Icon'][item])
time.sleep(1)
if now<=x:
flag = False
if flag:
exit()
Format of the xslx file
You can try this. It may help you

Related

Matplotlib, Tkinter, Serial Multiprocessing

I created a code (using Tkinter, Python3 and matplotlid) that could read data from different serial ports, save them to csv, then create graphs and finally preview data in GUI. The code was splited in two different scripts. The main script contained reading data, save data to csv an priview of data and the other script contained the graph creation.
Today I rewrote the code using the answer of #user2464430 here. The code is working, but I can't update the GUI. Opens once and then no refresh with new data.
The following code is a part of total code.
My code is:
from PIL import ImageTk, Image
import tkinter as Tk
import multiprocessing
from queue import Empty, Full
from time import strftime
import serial
import numpy as np
import matplotlib.pyplot as plt
from drawnow import *
from pylab import *
import pandas as pd
from datetime import timedelta
from datetime import datetime
import plotly.graph_objs as go
from plotly.subplots import make_subplots
import locale
import os
class GuiApp(object):
def __init__(self, image):
self.root = Tk.Tk()
self.root.resizable(width=False, height=False)
self.root.geometry("1600x800+0+0")
C = Canvas(self.root, bg="black", width=1600, height=800)
def BasicLabels():
....... # in this stage create multiple axis labels
Î¥AxisLabels()
BasicLabels()
def ValueLabels():
....... # Read and munipulate datas from CSV file and print in in labels
ValueLabels()
C.pack()
def GenerateData(q): #Read Serial Ports and store data to CSV file
file_exists = os.path.isfile("BigData.csv")
header = [["Daytime,T1"]]
if not file_exists:
with open("BigData.csv", "a+") as csvfile:
np.savetxt(csvfile, header, delimiter=",", fmt="%s", comments="")
while True:
try:
ser1 = serial.Serial(port="COM4", baudrate=9600)
read_ser1 = ser1.readline()
if read_ser1 == "":
read_ser1 = "Missing Value"
else:
read_ser1 = ser1.readline()
read_ser1 = str(read_ser1[0 : len(read_ser1)].decode("utf-8"))
# print("COM4:", read_ser1)
ser1.close()
except:
print("Failed 1")
read_ser1 = "9999,9999,9999,9999,9999"
daytime = strftime(" %d-%m-%Y %H:%M:%S")
rows = [
daytime
+ ","
+ read_ser1.strip()
]
with open("BigData.csv", "a+") as csvfile:
np.savetxt(csvfile, rows, delimiter=",", fmt="%s", comments="")
CreateGraphs()
def CreateGraphs():
#Code to generate graph. Called every time i have new line in CSV.
if __name__ == "__main__":
# Queue which will be used for storing Data
q = multiprocessing.Queue()
q.cancel_join_thread() # or else thread that puts data will not term
gui = GuiApp(q)
t1 = multiprocessing.Process(target=GenerateData, args=(q,))
t1.start()
gui.root.mainloop()
t1.join()
The graphs are generating after while True in GenerateData.
All datas for labels and graphs are coming from CSV file and not directly from serial port.
Is it possible to update GUI with latest datas from CSV and created graphs?
Thank for your time.

recording EEG data using python ,brain flow and saving it into csv file. I an unable time stamp and channel name title in CSV file

EEG device is OpenBCI and board is Cyton and daisy.My sample code is here:
from datetime import datetime
import argparse
import numpy as np
import pandas as pd
import csv
import mne
from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds
from brainflow.data_filter import DataFilter
import time
def main():
BoardShim.enable_dev_board_logger()
# use synthetic board for demo
params = BrainFlowInputParams()
board = BoardShim(BoardIds.SYNTHETIC_BOARD.value, params)
board.prepare_session()
board.start_stream()
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
BoardShim.log_message(LogLevels.LEVEL_INFO.value, 'start sleeping in the main thread')
time.sleep(10)
data = board.get_board_data()
board.stop_stream()
board.release_session()
eeg_channels = BoardShim.get_eeg_channels(BoardIds.SYNTHETIC_BOARD.value)
# demo for downsampling, it just aggregates data
for count, channel in enumerate(eeg_channels):
print('Original data for channel %d:' % channel)
print(data[channel])
print("Current Time =", current_time)
# demo how to convert it to pandas DF and plot data
# eeg_channels = BoardShim.get_eeg_channels(BoardIds.SYNTHETIC_BOARD.value)
# df = pd.DataFrame(np.transpose(data))
# print('Data From the Board')
# print(df.head(10))
# demo for data serialization using brainflow API, we recommend to use it instead
pandas.to_csv()
DataFilter.write_file(data, 'test.csv', 'w') # use 'a' for append mode
# writing the data in csv file
restored_data = DataFilter.read_file('test.csv')
restored_df = pd.DataFrame(np.transpose(restored_data))
print('Data From the File')
print(restored_df.head(5))
if __name__ == "__main__":
main()

Script Multiprocessing dont finish all task, and also i get 100 cpu?

i need to ask if part of my script is correct, working "i think fine" but i think really i have somethink wrong, because still i get CPU 100% and so many time dont finish all task but after 50/100 task is like frozen.
Any info how to edit it ? Or Maybe just tell me where is the error ?
Thank you
Ps. I have inserted all the modules that the script requires and only the part that should be of interest for multiprocessing and also just firt part of the script.
Many Thanks
from __future__ import print_function
import sys
import os
import easygui
import pyautogui as py
import datetime
import pwinput
import json
from collections import Counter
import random
import string
import threading
import subprocess
import multiprocessing
import queue
from multiprocessing import cpu_count
from multiprocessing import Value, Lock, Process, Queue, current_process
import numpy as np
import grequests
import requests
from requests.exceptions import ConnectionError
from requests.exceptions import HTTPError
import time
from time import sleep
number_of_processes = cpu_count()
class Counter(object):
def __init__(self, initval=0):
self.val = Value('i', initval)
self.lock = Lock()
def increment(self):
with self.lock:
self.val.value += 1
def value(self):
with self.lock:
return self.val.value
def updateTitle(number_of_processes,number_of_task,counterhits,counterdone,countersl,countml,username):
while True:
hits = int(counterhits.value())
done = int(counterdone.value())
shtot = int(countersl.value())
maitot = int(countml.value())
remain_scan = number_of_task - hits
elapsed = time.strftime('%H:%M:%S', time.gmtime(time.time() - start))
ctypes.windll.kernel32.SetConsoleTitleW(f'Site Valid For: {number_of_task} | Started: {hits} | Complete: {done} | Remain: {remain_scan} | SL Found: {shtot} | ML Found: {maitot} | Threads: {number_of_processes} | Time elapsed: {elapsed} ! Licensed at: {username}')
sleep(0.3)
def worker_main(tasks_to_do,tasks_finished,counterhits,counterdone,countersl,countml):
while True:
try:
site = tasks_to_do.get_nowait()
if site is None:
break
except Queue.Empty:
break
except Queue.Full:
sleep(0.5)
continue
counterhits.increment()
do_work(site,counterhits,counterdone,countersl,countml)
tasks_finished.put(site + current_process().name)
counterdone.increment()
return True
def main():
global username
number_of_task = int(len(filter_data))
counterhits = Counter(0)
counterdone = Counter(0)
countersl = Counter(0)
countml = Counter(0)
tasks_to_do = Queue()
tasks_finished = Queue()
processes1 = []
prefix = ['http://']
# creating processes
for w in range(number_of_processes):
p1 = Process(target=worker_main, args=(tasks_to_do,tasks_finished,counterhits,counterdone,countersl,countml))
processes1.append(p1)
p1.start()
procs = [Process(target=updateTitle, args=(number_of_processes,number_of_task,counterhits,counterdone,countersl,countml,username), daemon=True) for i in range(1)]
for p in procs: p.start()
for site_il in filter_data:
site_or = site_il.rstrip("\n")
if (site_or.startswith("http://")) :
site_or = site_or.replace("http://","")
elif (site_or.startswith("https://")) :
site_or = site_or.replace("https://","")
site_or = site_or.rstrip()
site_or = site_or.split('/')[0]
if ('www.' in site_or) :
site_or = site_or.replace("www.", "")
sitexx = [sub + site_or for sub in prefix]
for site in sitexx:
tasks_to_do.put(site)
# completing process
for p1 in processes1:
p1.join()
for p in procs: p.join()
# print the output
while not tasks_finished.empty():
print(tasks_finished.get())
os.system('pause>nul')
return True
if __name__ == '__main__':
if sys.platform.startswith('win'):
# On Windows calling this function is necessary.
multiprocessing.freeze_support()
main()

Stock Research Terminal

import requests
import pandas as pd
import plotly.graph_objects as go
import pandas as pd
from datetime import datetime
def income_statement(stock):
number_yrs = input('how many yr(s)?').strip()
api_key = 'd5abd8f1620e04709eeb05ebafa9af7e'
IS = requests.get(f'https://financialmodelingprep.com/api/v3/balance-sheet-statement/{stock}?limit={number_yrs}&apikey={api_key}').json()
IS = pd.DataFrame.from_dict(IS)
IS = IS.T
print(IS.T)
save_to_csv = input('save_to_csv? y or n').strip()
if save_to_csv == 'y':
IS.to_csv('IS' + stock + '.csv')
while True:
command = input('stock?')
stock = comman.split(' ')[1]
if comman == 'IS ' + stock:
income_statement(stock)
elif comman == 'quit':
break
else:
print('Invalid Command.')
I am learning python for finance right now. I would like to build a stock research terminal that allows me to call income statement, balance sheet, historical price etc. of a stock from the Internet. I used the above code but when I type IS TSLA, it still gives me the income statement of AAPL. I wonder which parts of the code go wrong? Thanks!

Capture a terminal output in real time of an python module

The python 'yfinance' module downloads the quotes of many Financial Securities in a pandas dataframe and in the meanwhile it displays a progress bar in the console. In this way:
import yfinance as yf
Tickerlist = ["AAPL","GOOG","MSFT"]
quote = yf.download(tickers=Tickerlist,period='max',interval='1d',group_by='ticker')
I would like to capture the console progress bar in real time, and the code should be this:
import sys
import subprocesss
process = subprocess.Popen(["yf.download","tickers=Tickerlist","period='max'","interval='1d'","group_by='ticker'"],stdout=quote)
while True:
out = process.stdout.read(1)
sys.stdout.write(out)
sys.stdout.flush()
I make a big mess with subprocess. I need your help! Thanks.
I have already seen all the links that deal with this topic but without being able to solve my problem.
You need two python files to do what you want.
one is yf_download.py and second is run.py
The file code looks like this and you can run it through run.py
python run.py
yf_download.py
import sys
import yfinance as yf
Tickerlist = ["AAPL","GOOG","MSFT"]
def run(period):
yf.download(tickers=Tickerlist, period=period,interval='1d',group_by='ticker')
if __name__ == '__main__':
period = sys.argv[1]
run(period)
run.py
import sys
import subprocess
process = subprocess.Popen(["python", "yf_download.py", "max"],stdout=subprocess.PIPE)
while True:
out = process.stdout.read(1)
if process.poll() is not None:
break
if out != '':
sys.stdout.buffer.write(out)
sys.stdout.flush()

Resources