Simpy synchronous communication channel - simpy

I'm a newbie starting to use Python and Simpy. I would like to have a synchronous communication channel between 2 processes. For example I would like to have:
channel = ...
def writer(env):
for i in range(2):
yield env.timeout(0.75)
yield channel.put(i)
print("produced {} at time {}".format(i, env.now))
def reader(env):
while (True):
yield env.timeout(1.2)
i = yield channel.get()
print("consumed {} at time {}".format(i, env.now))
env = simpy.Environment()
env.process(writer(env))
env.process(reader(env))
env.run()
It should give as a result:
produced 0 at time 1.2
consumed 0 at time 1.2
produced 1 at time 2.4
consumed 1 at time 2.4
What should I make/use for the definition of channel?
If I use a Store than I would get (slightly different from above):
import simpy
env = simpy.Environment()
channel = simpy.Store(env)
def writer():
for i in range(2):
yield env.timeout(0.75)
yield channel.put(i)
print("produced {} at time {}".format(i, env.now))
def reader():
while (True):
yield env.timeout(1.2)
i = yield channel.get()
print("consumed {} at time {}".format(i, env.now))
env.process(writer())
env.process(reader())
env.run()
and the output would be:
produced 0 at time 0.75
consumed 0 at time 1.2
produced 1 at time 1.5
consumed 1 at time 2.4
But I should get as mentioned above. The writer should wait until the reader is ready to read.

What you want is not directly possible with the built-in resources. A workaround might be the following:
import collections
import simpy
Message = collections.namedtuple('Message', 'received, value')
def writer(env, channel):
for i in range(2):
yield env.timeout(0.75)
msg = Message(env.event(), i)
yield channel.put(msg)
yield msg.received
print("produced {} at time {}".format(i, env.now))
def reader(env, channel):
while (True):
yield env.timeout(1.2)
msg = yield channel.get()
msg.received.succeed()
print("consumed {} at time {}".format(msg.value, env.now))
env = simpy.Environment()
channel = simpy.Store(env, capacity=1)
env.process(writer(env, channel))
env.process(reader(env, channel))
env.run()
Output:
consumed 0 at time 1.2
produced 0 at time 1.2
consumed 1 at time 2.4
produced 1 at time 2.4
If you do the print() before yield msg.received, you’ll get:
produced 0 at time 0.75
consumed 0 at time 1.2
produced 1 at time 1.95
consumed 1 at time 2.4
The alternative would be to write your own resource type.

Related

StopIteration from "yield from"

Newbie to generator here. My understanding is that when breaking from a generator function ( total_average), it will implicitly trigger StopIteration in wrap_average. But wrap_average will return None back to the caller, and the program should not see StopIteration.
def total_average():
total = 0.0
count = 0
avg = None
print("starting average generator")
while True:
num = yield avg
if num is None:
break
total += num
count += 1
avg = total/count
def wrap_average(average_generator):
"""This is just a pipe to the generator"""
print("starting wrap_average")
avg = yield from average_generator
# Note: total_average() is the generator object. total_average is generator function
w = wrap_average(total_average())
# Execute everthing until hitting the first iterator. None being returned
print("starting the generator: ", next(w))
print(w.send(3))
print(w.send(4))
# Finish Coroutine
# ? Not sure why w.send(None) is giving me stop iteration?
w.send(None)
However, Python 3.8 shows an StopIteration error. I'm not sure why?
The yield from in wrap_average does catch the StopIteration from total_average, but that's not the only StopIteration here, because total_average isn't the only generator.
wrap_average is also a generator. When it ends, it too raises a StopIteration. That's the StopIteration you get from the final send.

Co-routine returns None for every alternate iteration

I have a piece of code as shown below:
#!/bin/python3
import math
import os
import random
import re
import sys
import logging
def consumer():
while True:
x = yield
print(x)
def producer(n):
for _ in range(n):
x = int(input())
yield x
def rooter():
logging.info("Running the rooter")
while True:
value = (yield)
yield math.sqrt(value)
def squarer():
logging.info("Running the squarer")
while True:
value = (yield)
print("from squarer: {}".format(value))
yield value * value
def accumulator():
logging.info("Running the accumulator.")
running_total = 0
while True:
value = (yield)
running_total += value
yield running_total
def pipeline(prod, workers, cons):
logging.info("workers: {}".format(workers))
for num in prod:
for i, w in enumerate(workers):
num = w.send(num)
cons.send(num)
for worker in workers:
worker.close()
cons.close()
if __name__ == '__main__':
order = input().strip()
m = int(input())
prod = producer(m)
cons = consumer()
next(cons)
root = rooter()
next(root)
accumulate = accumulator()
next(accumulate)
square = squarer()
next(square)
pipeline(prod, eval(order), cons)
Sample input
[square, accumulate]
3 <- Number of inputs coming further
1 <- actual inputs
2
3
Sample Output
*The output should be as below:*
1
5
14
but comes to
10(sum of the squares of 1 and 3) when it should actually be 14 (sum of the squares of 1, 2, 3)
So essentially the input 2 is missed (It's second in the line of inputs).
On debugging further I found that this is the case for every alternate iteration, not just for the provided inputs here.
I am not able to decipher what's happening. If it's of any help, the co-routine squarer is the one returning None in the second iteration.
I'd appreciate any help.
I found a solution to this.
It's that we prime the co-routine after use in the pipeline function so the code becomes as follows: I have marked the next(w) line within asterix for reference.
#!/bin/python3
import math
import os
import random
import re
import sys
import logging
def consumer():
while True:
x = yield
print(x)
def producer(n):
for _ in range(n):
x = int(input())
yield x
def rooter():
logging.info("Running the rooter")
while True:
value = (yield)
yield math.sqrt(value)
def squarer():
logging.info("Running the squarer")
while True:
value = (yield)
print("from squarer: {}".format(value))
yield value * value
def accumulator():
logging.info("Running the accumulator.")
running_total = 0
while True:
value = (yield)
running_total += value
yield running_total
def pipeline(prod, workers, cons):
logging.info("workers: {}".format(workers))
for num in prod:
for i, w in enumerate(workers):
num = w.send(num)
**next(w)**
cons.send(num)
for worker in workers:
worker.close()
cons.close()
if __name__ == '__main__':
order = input().strip()
m = int(input())
prod = producer(m)
cons = consumer()
next(cons)
root = rooter()
next(root)
accumulate = accumulator()
next(accumulate)
square = squarer()
next(square)
pipeline(prod, eval(order), cons)
As mentioned in PEP specification it says that a generator function's yield
is always None when resumed by a normal next call. So when explicitly made to yield, it'll be ready to handle the next input immediately in this case.

Multiprocessing.apply on Python 3

I have a function that is looping using values from a dictionary. I want to split my dict keys, so i can break my dict at parts equal to my cpus. My fucntion is:
def find_something2(new2, threl=2.0, my_limit=150, far=365):
""" Find stocks tha are worth buying"""
global current_date, total_money, min_date, current_name, dates_dict, mylist, min_date_sell, reduced_stocks
worthing = list()
for stock in new2:
frame = reduced_stocks[stock]
temp = frame.loc[current_date:end_date]
if not temp.empty:
mydate = temp.head(far).Low.idxmin()
if mydate <= min_date_sell:
my_min = temp.head(far).Low.min()
if total_money >= my_min > 0: # find the min date at four months
ans, res, when_sell, total, income = worth_buy(stock, frame, mydate, 'Low',
thres=threl, sell_limit=my_limit)
if ans:
if income > 3 * 10 ** 6:
worthing.append([mydate, stock, res, when_sell, total, income])
if current_date > '1990-01-01':
return sorted(worthing, key=itemgetter(0))
elif current_date > '1985-01-01':
return sorted(worthing, key=itemgetter(0))
else:
answer = sorted(worthing, key=itemgetter(5), reverse=True)
return answer[::11]
so what i have tried is:
import multiprocessing as mp
result_list = []
def log_result(result):
# This is called whenever foo_pool(i) returns a result.
# result_list is modified only by the main process, not the pool workers.
global result_list
result_list.append(result)
def apply_async_with_callback():
global reduced_stocks
temp = list(reduced_stocks.keys())
temp1 = temp[0:1991]
temp2 = temp[1991:]
temp = [temp1, temp2]
pool = mp.Pool(2)
for i in temp:
pool.apply_async(find_something2, args=(i, 1.1, 2200, 1,), callback=log_result)
pool.close()
pool.join()
print(result_list)
if __name__ == '__main__':
apply_async_with_callback()
is this the right way?
I also tried threads but cpu goes max at 15% althoug iam using 12 threads(i have 6 intel core)
def pare():
relist = list(reduced_stocks.keys())
sublist = [relist[x:x+332] for x in range(0, len(relist), 332)]
data = [x for x in sublist]
threads = list()
from threading import Thread
for i in range(12):
process = Thread(target=find_something2, args=(1.4,2500,8,data[i],i,results))
process.start()
threads.append(process)
for process in threads:
process.join()
One way to do multiprocessing is to create a Pool and pass the prepared data to it. Wait for computation done and process the results. The code suggests how to do that.
# setup the function so it gets everything from arguments
def find_something2(new2, threl, my_limit, far, current_date, total_money, min_date_sell, reduced_stocks, end_date):
# ....
pass
# prepare the data
# replace the a1, a2 ... with the actual parameters your function takes
data = [(a1, a2, a3, ...) for your_data in your_dict]
import multiprocessing as mp
with mp.Pool() as pool:
results = pool.starmap(find_something2, data)
print(results)

Threading with Random wrong result

When using threading with random.shuffle get wrong result from a list.
There are values missing and other duplicated.
Tried use numpy but get the same problem.
#!/usr/bin/python3 -tt
import random
from threading import Thread
class Th(Thread):
def __init__ (self, num, c):
Thread.__init__(self)
self.c = c
self.num = num
def run(self):
random.shuffle(self.c)
for self.i in self.c:
print (str(self.num) + ' - ' + self.i)
def main():
with open ('file.txt', 'rU') as cmddb:
c = [ cmd.strip() for cmd in cmddb]
cmddb.close()
print ('ref:')
for i in c:
print (i)
print ('----')
thread_number=0
for i in range(0,5):
thread = Th(thread_number,c)
thread.start()
thread_number += 1
if __name__ == '__main__':
main()
#################################3
Result:
file.txt:
ref:
10
20
----
0 - 20 <========
1 - 10
1 - 20
0 - 20 <========= duplicated / missimg 10
3 - 20 <=
3 - 20 <= again
4 - 10
2 - 10
4 - 20
2 - 20
Problem occur some times. Other times it run ok.
This happens because the list c is the same list for all threads. So in some cases, some thread prints the first element of c, for example 10, and before it prints the second element, another thread called shuffle on c. if the order of the values in c has changed, the second element is now 10, so it will be printed again. To avoid it, give every thread a different copy of c:
change: thread = Th(thread_number,c) to thread = Th(thread_number, c[:])

Send message when failed(Python)

Im trying to simulate two machines working, and failing at random times. When they fail they call assistance. These two machines is part of bigger system of different machines, which needs to know when its neighbor has failed to do its job.
So far, I have made the simulate of the two machines, but I cant figure out how to send messages to their neighbors without each machine needing to know the whole system?
This is what I have so far:
import simpy
import random
random_seed=42
MTTF = 3500
break_mean = 1 / MTTF
sim_time = 4 * 7*24*60 # 4 weeks 24/7
num_machines = 2
rep_time = 30
tpp = 20 #20 minutes to make each part
neighbour = 3 #How many should it send to?
#Creating a class called messaging which is an object.
class messaging(object):
#DEfing the initilizing function, and initilize self, Environment, and capacity which is set to infinity, by a simpy core-function.
def __init__(self, env, capacity=simpy.core.Infinity):
self.env = env
self.capacity = capacity
self.pipes = []
#Making a function which work on everything that is a part of the message. With name Put.
def put(self, value):
if not self.pipes: #How to get this error?
raise runtime_error('There are no output pipes.')
#Create a variable, events, store to it pipe values
events = broken_machine()
return self.env.all_of(events)
def get_output_conn(self):
#Set the capacity of pipe variable to store infinity.
pipe = simpy.Store(self.env, capacity=self.capacity)
#to each pipes, add(or append) pipe
self.pipes.append(pipe)
return pipe
def mesg_generator(number, env, out_pipe):
msg = ('Failed')
def message_reciever(name, env, in_pipe):
while True:
msg = yield in_pipe.get()
print("%s received message: %s" % (number, msg[1]))
def time_per_part():
return tpp
def ttf():
return random.expovariate(break_mean)
class Machine(object):
def __init__(self, env, number, repair):
#self.arg = arg
self.env = env
self.number = number
self.parts_made = 0
self.times_broken = 0
self.broken = False
self.process = env.process(self.working(repair))
env.process(self.broken_machine())
def working(self, repair):
while True:
work = time_per_part()
while work:
try:
begin = self.env.now
yield self.env.timeout(work)
work = 0
except simpy.Interrupt:
self.broken = True
work -= self.env.now - begin
with repair.request(priority = 1) as req:
yield req
yield self.env.timeout(rep_time)
self.times_broken +=1
yield message_reciever()
#print('Machine down')
self.broken = False #Machine fixed again
self.parts_made +=1
def broken_machine(self):
while True:
yield self.env.timeout(ttf())
if not self.broken:
self.process.interrupt()
def other_jobs(env, repair):
while True:
work = tpp
while work:
with repair.request(priority=2) as req:
yield req
try:
begin = env.now
yield env.timeout(work)
work = 0
except simpy.Interrupt:
work -= env.now - begin
print("This simulates machines 3 and 4 doing the same tasks.")
random.seed(random_seed)
env = simpy.Environment()
pipe = simpy.Store(env)
bc_pipe = messaging(env)
repair = simpy.PreemptiveResource(env, capacity = 1)
machines = [Machine(env, 'Machine %d' % i, repair)
for i in range(num_machines)]
env.process(other_jobs(env, repair))
env.run(until=sim_time)
#Show how many times each machine failed:
for machine in machines:
print("%s broke down %d times" %(machine.number, machine.times_broken))

Resources