cannot find the process workflow - helloword in the workflow list - django-viewflow

Hi i am new to django and trying to run the sample process in http://docs.viewflow.io/viewflow_quickstart.html
but cannot find the process in the process list.
env lib:
process list is empty:
flow.py
from viewflow import flow
from viewflow.base import this, Flow
from viewflow.flow.views import CreateProcessView, UpdateProcessView
from .models import HelloWorldProcess
from viewflow import frontend
#frontend.register
class HelloWorldFlow(Flow):
process_class = HelloWorldProcess
start = (
flow.Start(
CreateProcessView,
fields=["text"]
).Permission(
auto_create=True
).Next(this.approve)
)
approve = (
flow.View(
UpdateProcessView,
fields=["approved"]
).Permission(
auto_create=True
).Next(this.check_approve)
)
check_approve = (
flow.If(lambda activation: activation.process.approved)
.Then(this.send)
.Else(this.end)
)
send = (
flow.Handler(
this.send_hello_world_request
).Next(this.end)
)
end = flow.End()
def send_hello_world_request(self, activation):
print(activation.process.text)

Related

Not able to retrieve tick data - Zerodha Web Socket

I am using the below code to connect to the Web Socket API of Zerodha to pull tick data information for a particular instrument that I am interested in. When I run the below piece of code, I am not able to pull any information. I am not sure whether I am calling the functions in the Streaming_Ticks class properly.
The instrument token, which is the input, is placed in the "parameter_file.csv" and this token needs to be passed on to on_connect callback function present inside Streaming_Ticks class.
Would welcome your comments on how to run this code correctly.
from kiteconnect import KiteConnect
from kiteconnect import KiteTicker
import os
import csv
#cwd = os.chdir("E:\\Algorthmic Trading\\Zerodha_Training")
class Streaming_Ticks:
def __init__(self):
access_token = open("access_token.txt",'r').read()
key_secret = open("key_info.txt",'r').read().split()
self.kite = KiteConnect(api_key=key_secret[0])
self.kite.set_access_token(access_token)
self.kws = KiteTicker(key_secret[0],self.kite.access_token)
def on_ticks(ws,ticks):
# Callback to receive ticks.
#logging.debug("Ticks: {}".format(ticks))
print(ticks)
def on_connect(ws,response):
# Callback on successful connect.
# Subscribe to a list of instrument_tokens (RELIANCE and ACC here).
#logging.debug("on connect: {}".format(response))
print(token_list)
ws.subscribe(token_list)
ws.set_mode(ws.MODE_FULL,token_list) # Set all token tick in `full` mode.
#ws.set_mode(ws.MODE_FULL,[tokens[0]]) # Set one token tick in `full` mode.
if __name__ == "__main__":
cwd = os.chdir("E:\\Algorthmic Trading\\Zerodha_Training")
tick_data = Streaming_Ticks()
token_list= []
with open('parameter_file.csv') as param_file:
param_reader = csv.DictReader(param_file, )
for row in param_reader:
token_list.append(int(row['token']))
tick_data.on_ticks = tick_data.on_ticks
tick_data.on_connect = tick_data.on_connect
tick_data.kws.connect()

How to update the value of pymodbus tcp server according to the message subscribed by zmq?

I am a newbie. My current project is when the current end decides to start the modbus service, I will create a process for the modbus service. Then the value is obtained in the parent process, through the ZeroMQ PUB/SUB to pass the value, I now want to update the value of the modbus register in the modbus service process.
I tried the method mentioned by pymodbus provided by updating_server.py, and twisted.internet.task.LoopingCall() to update the value of the register, but this will make it impossible for me to connect to my server with the client. I don't know why?
Use LoopingCall() to establish the server, the log when the client connects.
Then I tried to put both the uploading and startTCPserver in the async loop, but the update was only entered for the first time after the startup, and then it was not entered.
Currently, I'm using the LoopingCall() to handle updates, but I don't think this is a good way.
This is the code I initialized the PUB and all the tags that can read the tag.
from loop import cycle
import asyncio
from multiprocessing import Process
from persistence import models as pmodels
from persistence import service as pservice
from persistence import basic as pbasic
import zmq
from zmq.asyncio import Context
from common import logging
from server.modbustcp import i3ot_tcp as sertcp
import common.config as cfg
import communication.admin as ca
import json
import os
import signal
from datetime import datetime
from server.opcuaserver import i3ot_opc as seropc
async def main():
future = []
task = []
global readers, readers_old, task_flag
logger.debug("connecting to database and create table.")
pmodels.connect_create()
logger.debug("init read all address to create loop task.")
cycle.init_readers(readers)
ctx = Context()
publisher = ctx.socket(zmq.PUB)
logger.debug("init publish [%s].", addrs)
publisher.bind(addrs)
readers_old = readers.copy()
for reader in readers:
task.append(asyncio.ensure_future(
cycle.run_readers(readers[reader], publisher)))
if not len(task):
task_flag = True
logger.debug("task length [%s - %s].", len(task), task)
opcua_server = LocalServer(seropc.opc_server, "opcua")
future = [
start_get_all_address(),
start_api(),
create_address_loop(publisher, task),
modbus_server(),
opcua_server.run()
]
logger.debug("run loop...")
await asyncio.gather(*future)
asyncio.run(main(), debug=False)
This is to get the device tag value and publish it.
async def run_readers(reader, publisher):
while True:
await reader.run(publisher)
class DataReader:
def __init__(self, freq, clients):
self._addresses = []
self._frequency = freq
self._stop_signal = False
self._clients = clients
self.signature = sign_data_reader(self._addresses)
async def run(self, publisher):
while not self._stop_signal:
for addr in self._addresses:
await addr.read()
data = {
"type": "value",
"data": addr._final_value
}
publisher.send_pyobj(data)
if addr._status:
if addr.alarm_log:
return_alarm_log = pbasic.get_log_by_time(addr.alarm_log['date'])
if return_alarm_log:
data = {
"type": "alarm",
"data": return_alarm_log
}
publisher.send_pyobj(data)
self.data_send(addr)
logger.debug("run send data")
await asyncio.sleep(int(self._frequency))
def stop(self):
self._stop_signal = True
modbus server imports
from common import logging
from pymodbus.server.asynchronous import StartTcpServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSequentialDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from persistence import service as pservice
from persistence import basic as pbasic
import zmq
import common.config as cfg
import struct
import os
import signal
from datetime import datetime
from twisted.internet.task import LoopingCall
def updating_writer(a):
logger.info("in updates of modbus tcp server.")
context = a[0]
# while True:
if check_pid(os.getppid()) is False:
os.kill(os.getpid(), signal.SIGKILL)
url = ("ipc://{}" .format(cfg.get('ipc', 'pubsub')))
logger.debug("connecting to [%s].", url)
ctx = zmq.Context()
subscriber = ctx.socket(zmq.SUB)
subscriber.connect(url)
subscriber.setsockopt(zmq.SUBSCRIBE, b"")
slave_id = 0x00
msg = subscriber.recv_pyobj()
logger.debug("updates.")
if msg['data']['data_type'] in modbus_server_type and msg['type'] == 'value':
addr = pservice.get_mbaddress_to_write_value(msg['data']['id'])
if addr:
logger.debug(
"local address and length [%s - %s].",
addr['local_address'], addr['length'])
values = get_value_by_type(msg['data']['data_type'], msg['data']['final'])
logger.debug("modbus server updates values [%s].", values)
register = get_register(addr['type'])
logger.debug(
"register [%d] local address [%d] and value [%s].",
register, addr['local_address'], values)
context[slave_id].setValues(register, addr['local_address'], values)
# time.sleep(1)
def tcp_server(pid):
logger.info("Get server configure and device's tags.")
st = datetime.now()
data = get_servie_and_all_tags()
if data:
logger.debug("register address space.")
register_address_space(data)
else:
logger.debug("no data to create address space.")
length = register_number()
store = ModbusSlaveContext(
di=ModbusSequentialDataBlock(0, [0] * length),
co=ModbusSequentialDataBlock(0, [0] * length),
hr=ModbusSequentialDataBlock(0, [0] * length),
ir=ModbusSequentialDataBlock(0, [0] * length)
)
context = ModbusServerContext(slaves=store, single=True)
identity = ModbusDeviceIdentification()
identity.VendorName = 'pymodbus'
identity.ProductCode = 'PM'
identity.VendorUrl = 'http://github.com/bashwork/pymodbus/'
identity.ProductName = 'pymodbus Server'
identity.ModelName = 'pymodbus Server'
identity.MajorMinorRevision = '2.2.0'
# ----------------------------------------------------------------------- #
# set loop call and run server
# ----------------------------------------------------------------------- #
try:
logger.debug("thread start.")
loop = LoopingCall(updating_writer, (context, ))
loop.start(1, now=False)
# process = Process(target=updating_writer, args=(context, os.getpid(),))
# process.start()
address = (data['tcp_ip'], int(data['tcp_port']))
nt = datetime.now() - st
logger.info("modbus tcp server begin has used [%s] s.", nt.seconds)
pservice.write_server_status_by_type('modbus', 'running')
StartTcpServer(context, identity=identity, address=address)
except Exception as e:
logger.debug("modbus server start error [%s].", e)
pservice.write_server_status_by_type('modbus', 'closed')
This is the code I created for the modbus process.
def process_stop(p_to_stop):
global ptcp_flag
pid = p_to_stop.pid
os.kill(pid, signal.SIGKILL)
logger.debug("process has closed.")
ptcp_flag = False
def ptcp_create():
global ptcp_flag
pid = os.getpid()
logger.debug("sentry pid [%s].", pid)
ptcp = Process(target=sertcp.tcp_server, args=(pid,))
ptcp_flag = True
return ptcp
async def modbus_server():
logger.debug("get mosbuc server's status.")
global ptcp_flag
name = 'modbus'
while True:
ser = pservice.get_server_status_by_name(name)
if ser['enabled']:
if ser['tcp_status'] == 'closed' or ser['tcp_status'] == 'running':
tags = pbasic.get_tag_by_name(name)
if len(tags):
if ptcp_flag is False:
logger.debug("[%s] status [%s].", ser['tcp_name'], ptcp_flag)
ptcp = ptcp_create()
ptcp.start()
else:
logger.debug("modbus server is running ...")
else:
logger.debug("no address to create [%s] server.", ser['tcp_name'])
pservice.write_server_status_by_type(name, "closed")
else:
logger.debug("[%s] server is running ...", name)
else:
if ptcp_flag:
process_stop(ptcp)
logger.debug("[%s] has been closed.", ser['tcp_name'])
pservice.write_server_status_by_type(name, "closed")
logger.debug("[%s] server not allowed to running.", name)
await asyncio.sleep(5)
This is the command that Docker runs.
/usr/bin/docker run --privileged --network host --name scout-sentry -v /etc/scout.cfg:/etc/scout.cfg -v /var/run:/var/run -v /sys:/sys -v /dev/mem:/dev/mem -v /var/lib/scout:/data --rm shulian/scout-sentry
This is the Docker configuration file /etc/scout.cfg.
[scout]
mode=product
[logging]
level=DEBUG
[db]
path=/data
[ipc]
cs=/var/run/scout-cs.sock
pubsub=/var/run/pubsub.sock
I want to be able to trigger the modbus value update function when there is a message coming from ZeroMQ, and it will be updated correctly.
Let's start from inside out.
Q : ...this will make it impossible for me to connect to my server with the client. I don't know why?
ZeroMQ is a smart broker-less messaging / signaling middleware or better a platform for smart-messaging. In case one feels not so much familiar with the art of Zen-of-Zero as present in ZeroMQ Architecture, one may like to start with ZeroMQ Principles in less than Five Seconds before diving into further details.
The Basis :
The Scalable Formal Communication Archetype, borrowed from ZeroMQ PUB/SUB, does not come at zero-cost.
This means that each infrastructure setup ( both on PUB-side and on SUB-side ) takes some, rather remarkable time and no one can be sure of when the AccessNode cnfiguration results in RTO-state. So the SUB-side (as proposed above) ought be either a permanent entity, or the user shall not expect to make it RTO in zero-time, after a twisted.internet.task.LoopingCall() gets reinstated.
Preferred way: instantiate your (semi-)persistent zmq.Context(), get it configured so as to serve the <aContextInstance>.socket( zmq.PUB ) as needed, a minimum safeguarding setup being the <aSocketInstance>.setsockopt( zmq.LINGER, 0 ) and all transport / queuing / security-handling details, that the exosystem exposes to your code ( whitelisting and secure sizing and resources protection being the most probable candidates - but details are related to your application domain and the risks that you are willing to face being prepared to handle them ).
ZeroMQ strongly discourages from sharing ( zero-sharing ) <aContextInstance>.socket()-instances, yet the zmq.Context()-instance can be shared / re-used (ref. ZeroMQ Principles... ) / passed to more than one threads ( if needed ).
All <aSocketInstance>{.bind()|.connect()}- methods are expensive, so try to setup the infrastructure AccessPoint(s) and their due error-handling way before one tries to use the their-mediated communication services.
Each <aSocketInstance>.setsockopt( zmq.SUBSCRIBE, ... ) is expensive in that it may take ( depending on (local/remote) version ) a form of a non-local, distributed-behaviour - local side "sets" the subscription, yet the remote side has to "be informed" about such state-change and "implements" the operations in line with the actual (propagated) state. While in earlier versions, all messages were dispatched from the PUB-side and all the SUB-side(s) were flooded with such data and were left for "filtering" which will be moved into a local-side internal-Queue, the newer versions "implement" the Topic-Filter on the PUB-side, which further increases the latency of setting the new modus-operandi in action.
Next comes the modus-operandi: how <aSocketInstance>.recv() gets results:
In their default API-state, .recv()-methods are blocking, potentially infinitely blocking, if no messages arrive.
Solution: avoid blocking-forms of calling ZeroMQ <aSocket>.recv()-methods by always using the zmq.NOBLOCK-modes thereof or rather test a presence or absence of any expected-message(s) with <aSocket>.poll( zmq.POLLIN, <timeout> )-methods available, with zero or controlled-timeouts. This makes you the master, who decides about the flow of code-execution. Not doing so, you knowingly let your code depend on external sequence ( or absence ) of events and your architecture is prone to awful problems with handling infinite blocking-states ( or potential unsalvageable many-agents' distributed behaviour live-locks or dead-locks )
Avoid uncontrolled cross-breeding of event-loops - like passing ZeroMQ-driven-loops into an external "callback"-alike handler or async-decorated code-blocks, where the stack of (non-)blocking logics may wreck havoc the original idea just by throwing the system into an unresolvable state, where events miss expected sequence of events and live-locks are unsalvagable or just the first pass happen to go through.
Stacking asyncio-code with twisted-LoopingCall()-s and async/await-decorated code + ZeroMQ blocking .recv()-s is either a Piece-of-Filligrane-Precise-Art-of-Truly-a-Zen-Master, or a sure ticket to Hell - with all respect to the Art-of-Truly-Zen-Masters :o)
So, yes, complex thinking is needed -- welcome to the realms of distributed-computing!

wxpython can't import files in same directory

I wanted to make a Simple gui with wxpython (In python 3.6.1) because i didn't want to use the commandline for this project anymore.
I have a big file for the Logic in another file, that is also used by other non wxpython files.
But when I try to import my Logic class I Get this error :
ImportError: cannot import name 'QuestionAsk'
I use this Line to import the Logic class :
from Get import QuestionAsk
The file "Get.py" is in the same directory as the gui file.
Here
But it dosen't work why and how can I import this file ?
By the way I call the gui.py file from the get.py file and import the get.py file from the Asker.py file.
ps : Pleas try to not answer :" just copy the content of that class into your gui file" because i use this class elsewhere if there is no other solution then that's ok but that's just ugly and unefficient.
Edit
Here is code from the Get file that starts the Gui :
def graphical_start():
app = wx.App(False) # int app
frame = GuiVocCard() # set frame (GuiVocCard is in the main Gui File)
frame.Show() # show frame
app.MainLoop() # execute loop
And here is The Start of the main Gui class "GuiVocCard" :
class GuiVocCard(wx.Frame):
def __init__(self):
self.language = "es"
self.transList = "C:\\Users\\Justus\\Desktop\\Schule\\spa\VocabGeter\\translations\\big_translation.json"
self.verb_forms = [0,2]
self.High_Score = 0
self.s_file = "scores.json"
self.S_chunk = 40
self.chunk_file = "chunks.json"
self.load_config()
self.q_ask = QuestionAsk(lan=self.language, trans=self.transList, verb_forms=self.verb_forms,
scores_file=self.s_file, chunks_file=self.chunk_file, chunk_size=self.S_chunk)
...
Edit 2 :
The QuestionAsk is defined here in the Get.py file:
class QuestionAsk:
def __init__(self,lan="es",trans="C:\\Users\\Justus\\Desktop\\Schule\\spa\VocabGeter\\translations\\translation.json"
,verb_forms=[0,2],scores_file="scores.json",chunks_file="chunks.json",chunk_size=40):
self.language = lan
self.transList = trans
self.verb_forms = verb_forms
self.High_Score = 0
self.s_file = scores_file
self.S_chunk = chunk_size
self.chunk_file = chunks_file
Fixed it (yay) :
I removed the graphical_start function from the Get.py file and pasted it into the gui.py file . I now import the Gui.py file directly from the Asker.py file .

JIRA Groovy Script - Check on duplicate sub-tasks

The code below creates 3 sub-tasks for each Version selected in SW Version custom field. This is placed on a workflow transition as a post function and works perfectly. However, if the workflow transition is revisited it creates duplicate subtasks for the already selected versions.
Example:
SW Version field has 5 options:
1, 2, 3, 4, 5
if a user selected 1 and 2 and proceeds with the transition, it will create 6 subtasks 3 for each version selected. (All good here)
if the user updates the SW Version field to 1, 2 , 3, and 4 which already has 1 and 2 pre-selected, it will create 12 additional subtasks making the total 18 with 6 duplicates of 1 and 2.
I wanted to put a check on these duplicates, so basic logic is if the subtask already exist go to the next one. Something like for each version selected, check if subtasks already exist and proceed. I tried doing it various ways and failed. Have commented out some codes below to get summary of the subtasks and compare with all existing subtasks summary but it doesn't work.
In addition I tried doing this to no avail.
if(issue.getSubTaskObjects()*.summary.equals(summaryText)){
log.info("Subtask already exists")
return;
Any help would be greatly appreciated !
import com.atlassian.jira.component.ComponentAccessor
import com.atlassian.jira.issue.IssueManager
import com.atlassian.jira.issue.Issue
import com.atlassian.jira.util.ImportUtils
import com.atlassian.jira.issue.CustomFieldManager
import com.atlassian.jira.issue.MutableIssue
import com.atlassian.jira.issue.index.IssueIndexManager
import com.atlassian.jira.issue.link.IssueLinkManager
import com.atlassian.jira.issue.index.IssueIndexingService
import org.apache.log4j.Logger
import org.apache.log4j.Level
log.info("Processing: " + issue.key);
CustomFieldManager customFieldManager = ComponentAccessor.customFieldManager
IssueManager issueManager = ComponentAccessor.getIssueManager();
def cfM119 = customFieldManager.getCustomFieldObjectByName("SW Version")
log.info("cfM119: " + cfM119)
def m119VersionArray = issue.getCustomFieldValue(cfM119) as String[]
def reqAssignee = 'user1'
def swAssignee = 'user2'
def testAssignee = 'user3'
//collecting subtask object
//Collection allsubtasks = issue.getSubTaskObjects()
//for(Issue allsubtask: allsubtasks) {
//def subtaskSummary = allsubtask.getSummary() as String[]
//log.info("Subtask Summary" + subtaskSummary)
//if (subtaskSummary[]){
// log.info("Subtask already exists")}
//else {
m119VersionArray.each{ version ->
createSubTask("", version, "_Approved_REQ", reqAssignee)
createSubTask("", version, "_Approved_SW", swAssignee)
createSubTask("", version, "_Approved_TEST", testAssignee)
}
def createSubTask(String component, version, type, String assignee) {
def Long issueLinkType = new Long (10702)
def Long sequence = new Long (1)
//Issue issue
def summaryText = component + version + " " + type
def issueManager = ComponentAccessor.issueManager
def issueFactory = ComponentAccessor.issueFactory
def subTaskManager = ComponentAccessor.subTaskManager
def issueLinkManager = ComponentAccessor.issueLinkManager
def userManager = ComponentAccessor.userManager
def authenticationContext = ComponentAccessor.jiraAuthenticationContext
if(issue.getSubTaskObjects()*.summary.equals(summaryText)){
log.info("Subtask already exists")
return;
}
// Defining subtask
def newIssue = issueFactory.getIssue()
newIssue.setIssueTypeId("5")
newIssue.setParentId(issue.getId())
newIssue.setProjectObject(issue.getProjectObject())
newIssue.setSummary(summaryText)
newIssue.setAssignee(userManager.getUserByName(assignee))
newIssue.setDescription(issue.getDescription())
log.info("Creating subtask - " + summaryText)
def subTask = issueManager.createIssueObject(authenticationContext.getLoggedInUser(), newIssue)
subTaskManager.createSubTaskIssueLink(issue, subTask, authenticationContext.getLoggedInUser())
issueLinkManager.createIssueLink(issue.getId(), newIssue.getId(), issueLinkType, sequence, authenticationContext.getLoggedInUser())
// reindex
ImportUtils.setIndexIssues(true)
IssueIndexingService issueIndexService =
ComponentAccessor.getComponent(IssueIndexingService.class)
issueIndexService.reIndex(subTask)
ImportUtils.setIndexIssues(false)
}
It's not working just because issue.getSubTaskObjects()*.summary return array and then you try compare this array with string in .equals(summaryText). But you was close and if change this part to issue.getSubTaskObjects()*.summary.contains(summaryText) (mean you search summaryText in each array element) it will be work pretty well.
Also in my opinion next code will be more understandable and clear issue.getSubTaskObjects().find{it.getSummary() == summaryText} (this also will be work inside if statement)

how to get celery tasks id

I set up a periodic task using celery beat. The task runs and I can see the result in the console.
I want to have a python script that recollects the results thrown by the tasks.
I could do it like this:
#client.py
from cfg_celery import app
task_id = '337fef7e-68a6-47b3-a16f-1015be50b0bc'
try:
x = app.AsyncResult(id)
print(x.get())
except:
print('some error')
Anyway, as you can see, for this test I had to copy the task_id thrown at the celery beat console (so to say) and hardcode it in my script. Obviously this is not going to work in real production.
I hacked it setting the task_id on the celery config file:
#cfg_celery.py
app = Celery('celery_config',
broker='redis://localhost:6379/0',
include=['taskos'],
backend = 'redis'
)
app.conf.beat_schedule = {
'something': {
'task': 'tasks.add',
'schedule': 10.0,
'args': (16, 54),
'options' : {'task_id':"my_custom_id"},
}
}
This way I can read it like this:
#client.py
from cfg_celery import app
task_id = 'my_custom_id'
try:
x = app.AsyncResult(id)
print(x.get())
except:
print('some error')
The problem with this approach is that I lose the previous results (previous to the call of client.py).
Is there some way I can read a list of the task_id's in the celery backend?
If I have more than one periodic tasks, can I get a list of task_id's from each periodic task?
Can I use app.tasks.key() to accomplish this, how?
pd: not english-speaking-native, plus new to celery, be nice if I used some terminology wrong.
OK. I am not sure if nobody answered this because is difficult or because my question is too dumb.
Anyway, what I wanted to do is to get the results of my 'celery-beat' tasks from another python process.
Being in the same process there was no problem I could access the task id and everything was easy from there on. But from other process I didn't find a way to retrieve a list of the finished tasks.
I tried python-RQ (it is nice) but when I saw that using RQ I couldn't do that either I came to understand that I had to manually make use of redis storage capabilities. So I got what I wanted, doing this:
. Use 'bind=True' to be able to instrospect from within the task function.
. Once I have the result of the function, I write it in a list in redis (I made some trick to limit the sizeof this list)
. Now I can from an independent process connect to the same redis server and retrieve the results stored in such list.
My files ended up being like this:
cfg_celery.py : here I define the way the tasks are going to be called.
#cfg_celery.py
from celery import Celery
appo = Celery('celery_config',
broker='redis://localhost:6379/0',
include=['taskos'],
backend = 'redis'
)
'''
urlea se decoro como periodic_task. no hay necesidad de darla de alta aqi.
pero como add necesita args, la doy de alta manualmente p pasarselos
'''
appo.conf.beat_schedule = {
'q_loco': {
'task': 'taskos.add',
'schedule': 10.0,
'args': (16, 54),
# 'options' : {'task_id':"lcura"},
}
}
taskos.py : these are the tasks.
#taskos.py
from cfg_celery import appo
from celery.decorators import periodic_task
from redis import Redis
from datetime import timedelta
import requests, time
rds = Redis()
#appo.task(bind=True)
def add(self,a, b):
#result of operation. very dummy.
result = a + b
#storing in redis
r= (self.request.id,time.time(),result)
rds.lpush('my_results',r)
# for this test i want to have at most 5 results stored in redis
long = rds.llen('my_results')
while long > 5:
x = rds.rpop('my_results')
print('popping out',x)
long = rds.llen('my_results')
time.sleep(1)
return a + b
#periodic_task(run_every=20)
def urlea(url='https://www.fullstackpython.com/'):
inicio = time.time()
R = dict()
try:
resp = requests.get(url)
R['vato'] = url+" = " + str(resp.status_code*10)
R['num palabras'] = len(resp.text.split())
except:
R['vato'] = None
R['num palabras'] = 0
print('u {} : {}'.format(url,time.time()-inicio))
time.sleep(0.8) # truco pq se vea mas claramente la dif.
return R
consumer.py : the independent process that can get the results.
#consumer.py
from redis import Redis
nombre_lista = 'my_results'
rds = Redis()
tamaño = rds.llen(nombre_lista)
ultimos_resultados = list()
for i in range(tamaño):
ultimos_resultados.append(rds.rpop(nombre_lista))
print(ultimos_resultados)
I am relatively new to programming and I hope that this answer can help noobs like me. If I got something wrong feel free to make the corrections as necessary.

Resources