I want to create two kinds of Processes, they are basically two patient types, One Category1 and other is Catergory2. Each of these are generated at a specific interval i.e. the rate of arriving at Emergency Room and each of them are competing for resources, which are doctors such. I don't know how to create two different processes. The terminal output is such
4.4910 Patient_C304: Here I am. [0, 0, 0, 0, 0]
4.4910 Patient_C304: Waited 0.000
5.0008 Patient_C304: Finished
Traceback (most recent call last):
File "/home/o/Documents/hos/bank.py", line 153, in <module>
simulate(until=maxTime)
File "/home/o/Documents/hos/hos/lib/python3.8/site-packages/SimPy/Globals.py", line 59, in simulate
return sim.simulate(until = until)
File "/home/o/Documents/hos/hos/lib/python3.8/site-packages/SimPy/Simulation.py", line 551, in simulate
step()
File "/home/o/Documents/hos/hos/lib/python3.8/site-packages/SimPy/Simulation.py", line 495, in step
resultTuple = next(proc._nextpoint)
TypeError: 'NoneType' object is not an iterator
Heres my code:
""" Hospital10: Several doctors with individual queues"""
from SimPy.Simulation import *
from random import expovariate, triangular, seed
# Model components ------------------------
interval_c = [0.02, 0.07, 0.12]
class Source(Process):
""" Source generates Patients randomly"""
def generate_C1(self, number, interval, doctors):
for i in range(number):
p_C1 = PatientC1(name="Patient_C1%02d" % (i,))
activate(p_C1, p_C1.visit(doctors))
t_C1 = expovariate(interval[0])
yield hold, self, t_C1,
def generate_C2(self, number, interval, doctors):
for i in range(number):
p_C2 = PatientC2(name="Patient_C2%02d" % (i,))
activate(p_C2, p_C2.visit(doctors))
t_C2 = expovariate(interval[1])
yield hold, self, t_C2
def generate_C3( self, number, interval, doctors):
for i in range(number):
p_C3= PatientC3(name="Patient_C3%02d" % (i,))
activate(p_C3, p_C3.visit(doctors))
t_C3 = expovariate(interval[2])
yield hold, self, t_C3
def NoInSystem(R):
""" Total number of Patients in the resource R"""
return (len(R.waitQ)+len(R.activeQ))
class PatientC1(Process):
""" Patient arrives, chooses the shortest queue
is served and leaves
"""
def visit(self, doctors):
arrive = now()
Qlength = [NoInSystem(doctors[i]) for i in range(Nd)]
print("%7.4f %s: Here I am. %s" % (now(), self.name, Qlength))
for i in range(Nd):
if Qlength[i] == 0 or Qlength[i] == min(Qlength):
choice = i # the chosen queue number
break
yield request, self, doctors[choice]
wait = now()-arrive
print("%7.4f %s: Waited %6.3f" % (now(), self.name, wait))
tib = triangular(1.0/timeInHospital)
yield hold, self, tib
yield release, self, doctors[choice]
print("%7.4f %s: Finished" % (now(), self.name))
# Experiment data -------------------------
class PatientC2(Process):
""" Patient arrives, chooses the shortest queue
is served and leaves
"""
def visit(self, doctors):
arrive = now()
Qlength = [NoInSystem(doctors[i]) for i in range(Nd)]
print("%7.4f %s: Here I am. %s" % (now(), self.name, Qlength))
for i in range(Nd):
if Qlength[i] == 0 or Qlength[i] == min(Qlength):
choice = i # the chosen queue number
break
yield request, self, doctors[choice]
wait = now()-arrive
print("%7.4f %s: Waited %6.3f" % (now(), self.name, wait))
tib = triangular(1.0/timeInHospital)
yield hold, self, tib
yield release, self, doctors[choice]
print("%7.4f %s: Finished" % (now(), self.name))
class PatientC3(Process):
""" Patient arrives, chooses the shortest queue
is served and leaves
"""
def visit(self, doctors):
arrive = now()
Qlength = [NoInSystem(doctors[i]) for i in range(Nd)]
print("%7.4f %s: Here I am. %s" % (now(), self.name, Qlength))
for i in range(Nd):
if Qlength[i] == 0 or Qlength[i] == min(Qlength):
choice = i # the chosen queue number
break
yield request, self, doctors[choice]
wait = now()-arrive
print("%7.4f %s: Waited %6.3f" % (now(), self.name, wait))
tib = triangular(1.0/timeInHospital)
yield hold, self, tib
yield release, self, doctors[choice]
print("%7.4f %s: Finished" % (now(), self.name))
maxNumber = 5
maxTime = 400.0 # minutes
timeInHospital = 12.0 # mean, minutes
ARRint = 0.02 # mean, minutes
Nd = 5 # number of doctors
theseed = 12345
# Model/Experiment ------------------------------
seed(theseed)
# kk = [Resource(name="Doctor1"), Resource(name="Doctor2")]
kk = [Resource(name="Doctor1"), Resource(name="Doctor2"), Resource(name="Doctor3"), Resource(name="Doctor4"), Resource(name="Doctor5")]
initialize()
s = Source('Source')
activate(s, s.generate_C1(number=maxNumber, interval=interval_c,
doctors=kk), at=0.0)
activate(s, s.generate_C2(number=maxNumber, interval=interval_c,
doctors=kk), at=0.0)
activate(s, s.generate_C3(number=maxNumber, interval=interval_c,
doctors=kk), at=0.0)
simulate(until=maxTime)
This focuses on selecting a short queue and generating visits
"""
quick sim of pataints visting doctors
programmer Michael R. Gibbs
"""
import simpy
import random
import sys
from typing import Tuple
def get_short(resourceDict) -> Tuple[str, simpy.Resource]:
"""
finds the resource in a dict with the shortest queue + users
returns doc's name, and resource
"""
best_q_len = sys.maxsize
best_r = None
best_k = None
# iter though dict comparing queue lenghts
for k,v in resourceDict.items():
q_len = len(v.users) + len(v.queue)
if q_len < best_q_len:
best_q_len = q_len
best_r = v
best_k = k
return best_k, best_r
def visit(env, id, pat_type, doc_dict):
"""
sims a patient meeting with a doc
queue
met
leave
"""
# select doc with shortest queue
doc, r = get_short(doc_dict)
q_len = len(r.users) + len(r.queue)
print(f'{env.now} - patient {id} of type {pat_type} has queued with doc {doc} queue size {q_len}')
with r.request() as req:
# wait in queue
yield req
# visit with doc
print(f'{env.now} - patient {id} of type {pat_type} is with doc {doc}')
yield env.timeout(random.randint(2,10))
# leave
print(f'{env.now} - patient {id} of type {pat_type} is done with doc {doc}')
next_id = 1
def gen_visits(env, pat_type, doc_dict):
"""
gen a stream of patients of pat_type
"""
global next_id
while True:
# wait for next arrival
yield env.timeout(random.randint(1,4))
#start visit
env.process(visit(env, 'pat_' + str(next_id), pat_type, doc_dict))
next_id += 1
# start sim
env = simpy.Environment()
# to create a queue for each doc, make each doc its own resource of cap=1
doc_dict = {'doc_' + str(i): simpy.Resource(env, capacity=1) for i in range(1,4+1)}
# gen different types of pataients
env.process(gen_visits(env, "pat_type_A", doc_dict))
env.process(gen_visits(env, "pat_type_B", doc_dict))
env.run(100)
Related
I'm trying to create a Parking Lot in OOP. i want to check if any key is already in my dictionary.
for example i dont want the same "plate number" in my dict.
I'm using the command:
if plate in p1.carsAndEnterTime:
print("This plate number already exists in the system")
plate = input("Please enter the plate number:\n")
But it didnt found any key.
This is my full code:
class Cars:
def __init__(self, phone, car_type, plate):
self.__phone = phone
self.__car_type = car_type
self.__plate = plate
def __repr__(self):
return f"{self.__plate}, {self.__phone}, {self.__car_type}"
def __str__(self):
return f"{self.__plate}, {self.__phone}, {self.__car_type}"
class ParkingLot:
def __init__(self, name, capacity=1):
''' return a ParkingLot object with name "name" '''
self.name = name
self.capacity = capacity
self.earnings = 0
self.rate = 15
self.carsAndEnterTime = {}
def SetCapacity(self, newCap):
''' change the capacity from the default 1 '''
if newCap < 1:
raise RuntimeError("Error: parking lot size cannot be less than 1")
self.capacity = newCap
def GetCapacity(self):
''' return parking lot capacity '''
return self.capacity
def GetEarnings(self):
''' return how much much parking has made '''
return self.earnings
def VehicleEnters(self, vehicle):
''' vehicle enters parking lot'''
# put car and its enter time in a dictionary
self.carsAndEnterTime[vehicle] = datetime.datetime.now()
if self.capacity == 0:
raise RuntimeError("Error: Parking lot full!")
self.capacity -= 1
def SetSecondlyRate(self, rate=20):
self.rate = rate
def VehicleLeaves(self, vehicle):
''' vehicle leaves parking lot. when it leaves, charges money '''
secondsDiff = datetime.datetime.now() - self.carsAndEnterTime[vehicle]
hour_roundup = math.ceil(secondsDiff.seconds / 3600)
self.earnings += self.rate * hour_roundup
# after earned money, delete vehicle from dictionary
del self.carsAndEnterTime[vehicle]
self.capacity += 1
When i do the following:
>>> p1 = ParkingLot(p1,2)
>>> plate = 12345
>>> car_type = "Public"
>>> phone = "05555555"
>>> c = c1 = Cars(plate, car_type, phone)
when i try to check if the palte inside the dict
but its ignores, Although palte exists dict.
if plate in p1.carsAndEnterTime:
print("This plate number already exists in the system")
plate = input("Please enter the plate number:\n")
for example i print my dict:
and you can see the 12345 is found twise in my dict.
>>>print(p1.carsAndEnterTime)
{12345, 55555, p: datetime.datetime(2020, 5, 10, 23, 0, 36, 557859), 12345, 5555, p: datetime.datetime(2020, 5, 10, 23, 0, 44, 568150)}
What am I doing wrong and how do I fix it?
You do not put the cars into your parking lot by their Plate - you put full car-instances into your dictionary. If you put in 2 different car instances with the same values the id(car1) and id(car2) are different because 2 different objects. Hence no equality.
A crude way around that would be to check:
def CarAlreadyParked(self, vehicle):
"""Returns true if the exact car is already parked."""
return any(str(c) == str(vehicle) for c in self.carsAndEnterTime)
before letting the car in - could be a forged identity car. All in all it would be better to redesign and maybe store cars by there plate in a dict:
dict [ PlateNr] = (VehicleInstance, CheckinTime)
so you can quickly find them by plateNr and have the vehicle-object in a tuple on the value
I have a function that is looping using values from a dictionary. I want to split my dict keys, so i can break my dict at parts equal to my cpus. My fucntion is:
def find_something2(new2, threl=2.0, my_limit=150, far=365):
""" Find stocks tha are worth buying"""
global current_date, total_money, min_date, current_name, dates_dict, mylist, min_date_sell, reduced_stocks
worthing = list()
for stock in new2:
frame = reduced_stocks[stock]
temp = frame.loc[current_date:end_date]
if not temp.empty:
mydate = temp.head(far).Low.idxmin()
if mydate <= min_date_sell:
my_min = temp.head(far).Low.min()
if total_money >= my_min > 0: # find the min date at four months
ans, res, when_sell, total, income = worth_buy(stock, frame, mydate, 'Low',
thres=threl, sell_limit=my_limit)
if ans:
if income > 3 * 10 ** 6:
worthing.append([mydate, stock, res, when_sell, total, income])
if current_date > '1990-01-01':
return sorted(worthing, key=itemgetter(0))
elif current_date > '1985-01-01':
return sorted(worthing, key=itemgetter(0))
else:
answer = sorted(worthing, key=itemgetter(5), reverse=True)
return answer[::11]
so what i have tried is:
import multiprocessing as mp
result_list = []
def log_result(result):
# This is called whenever foo_pool(i) returns a result.
# result_list is modified only by the main process, not the pool workers.
global result_list
result_list.append(result)
def apply_async_with_callback():
global reduced_stocks
temp = list(reduced_stocks.keys())
temp1 = temp[0:1991]
temp2 = temp[1991:]
temp = [temp1, temp2]
pool = mp.Pool(2)
for i in temp:
pool.apply_async(find_something2, args=(i, 1.1, 2200, 1,), callback=log_result)
pool.close()
pool.join()
print(result_list)
if __name__ == '__main__':
apply_async_with_callback()
is this the right way?
I also tried threads but cpu goes max at 15% althoug iam using 12 threads(i have 6 intel core)
def pare():
relist = list(reduced_stocks.keys())
sublist = [relist[x:x+332] for x in range(0, len(relist), 332)]
data = [x for x in sublist]
threads = list()
from threading import Thread
for i in range(12):
process = Thread(target=find_something2, args=(1.4,2500,8,data[i],i,results))
process.start()
threads.append(process)
for process in threads:
process.join()
One way to do multiprocessing is to create a Pool and pass the prepared data to it. Wait for computation done and process the results. The code suggests how to do that.
# setup the function so it gets everything from arguments
def find_something2(new2, threl, my_limit, far, current_date, total_money, min_date_sell, reduced_stocks, end_date):
# ....
pass
# prepare the data
# replace the a1, a2 ... with the actual parameters your function takes
data = [(a1, a2, a3, ...) for your_data in your_dict]
import multiprocessing as mp
with mp.Pool() as pool:
results = pool.starmap(find_something2, data)
print(results)
I'm a student and working on a small assignment where I need to collect inputs from the student on factors like kind of books they like to issue from the library. I've been provided id_tree class which I need to search using. As you can see I'm getting inputs from the console and I like to use that as the search criteria and get the recommendation from the id tree.
Just for testing purpose, I'm using out.py, but that needs to be replaced with id_tree search logic for which I'm struggling.
# k-Nearest Neighbors and Identification Trees
#api.py
import os
from copy import deepcopy
from functools import reduce
################################################################################
############################# IDENTIFICATION TREES #############################
################################################################################
class Classifier :
def __init__(self, name, classify_fn) :
self.name = str(name)
self._classify_fn = classify_fn
def classify(self, point):
try:
return self._classify_fn(point)
except KeyError as key:
raise ClassifierError("point has no attribute " + str(key) + ": " + str(point))
def copy(self):
return deepcopy(self)
def __eq__(self, other):
try:
return (self.name == other.name
and self._classify_fn.__code__.co_code == other._classify_fn.__code__.co_code)
except:
return False
def __str__(self):
return "Classifier<" + str(self.name) + ">"
__repr__ = __str__
## HELPER FUNCTIONS FOR CREATING CLASSIFIERS
def maybe_number(x) :
try :
return float(x)
except (ValueError, TypeError) :
return x
def feature_test(key) :
return Classifier(key, lambda pt : maybe_number(pt[key]))
def threshold_test(feature, threshold) :
return Classifier(feature + " > " + str(threshold),
lambda pt: "Yes" if (maybe_number(pt.get(feature)) > threshold) else "No")
## CUSTOM ERROR CLASSES
class NoGoodClassifiersError(ValueError):
def __init__(self, value=""):
self.value = value
def __str__(self):
return repr(self.value)
class ClassifierError(RuntimeError):
def __init__(self, value=""):
self.value = value
def __str__(self):
return repr(self.value)
class IdentificationTreeNode:
def __init__(self, target_classifier, parent_branch_name=None):
self.target_classifier = target_classifier
self._parent_branch_name = parent_branch_name
self._classification = None #value, if leaf node
self._classifier = None #Classifier, if tree continues
self._children = {} #dict mapping feature to node, if tree continues
self._data = [] #only used temporarily for printing with data
def get_parent_branch_name(self):
return self._parent_branch_name if self._parent_branch_name else "(Root node: no parent branch)"
def is_leaf(self):
return not self._classifier
def set_node_classification(self, classification):
self._classification = classification
if self._classifier:
print("Warning: Setting the classification", classification, "converts this node from a subtree to a leaf, overwriting its previous classifier:", self._classifier)
self._classifier = None
self._children = {}
return self
def get_node_classification(self):
return self._classification
def set_classifier_and_expand(self, classifier, features):
if classifier is None:
raise TypeError("Cannot set classifier to None")
if not isinstance_Classifier(classifier):
raise TypeError("classifier must be Classifier-type object: " + str(classifier))
self._classifier = classifier
try:
self._children = {feature:IdentificationTreeNode(self.target_classifier, parent_branch_name=str(feature))
for feature in features}
except TypeError:
raise TypeError("Expected list of feature names, got: " + str(features))
if len(self._children) == 1:
print("Warning: The classifier", classifier.name, "has only one relevant feature, which means it's not a useful test!")
if self._classification:
print("Warning: Setting the classifier", classifier.name, "converts this node from a leaf to a subtree, overwriting its previous classification:", self._classification)
self._classification = None
return self
def get_classifier(self):
return self._classifier
def apply_classifier(self, point):
if self._classifier is None:
raise ClassifierError("Cannot apply classifier at leaf node")
return self._children[self._classifier.classify(point)]
def get_branches(self):
return self._children
def copy(self):
return deepcopy(self)
def print_with_data(self, data):
tree = self.copy()
tree._assign_data(data)
print(tree.__str__(with_data=True))
def _assign_data(self, data):
if not self._classifier:
self._data = deepcopy(data)
return self
try:
pairs = list(self._soc(data, self._classifier).items())
except KeyError: #one of the points is missing a feature
raise ClassifierError("One or more points cannot be classified by " + str(self._classifier))
for (feature, branch_data) in pairs:
if feature in self._children:
self._children[feature]._assign_data(branch_data)
else: #feature branch doesn't exist
self._data.extend(branch_data)
return self
_ssc=lambda self,c,d:self.set_classifier_and_expand(c,self._soc(d,c))
_soc=lambda self,d,c:reduce(lambda b,p:b.__setitem__(c.classify(p),b.get(c.classify(p),[])+[p]) or b,d,{})
def __eq__(self, other):
try:
return (self.target_classifier == other.target_classifier
and self._parent_branch_name == other._parent_branch_name
and self._classification == other._classification
and self._classifier == other._classifier
and self._children == other._children
and self._data == other._data)
except:
return False
def __str__(self, indent=0, with_data=False):
newline = os.linesep
ret = ''
if indent == 0:
ret += (newline + "IdentificationTreeNode classifying by "
+ self.target_classifier.name + ":" + newline)
ret += " "*indent + (self._parent_branch_name + ": " if self._parent_branch_name else '')
if self._classifier:
ret += self._classifier.name
if with_data and self._data:
ret += self._render_points()
for (feature, node) in sorted(self._children.items()):
ret += newline + node.__str__(indent+1, with_data)
else: #leaf
ret += str(self._classification)
if with_data and self._data:
ret += self._render_points()
return ret
def _render_points(self):
ret = ' ('
first_point = True
for point in self._data:
if first_point:
first_point = False
else:
ret += ', '
ret += str(point.get("name","datapoint")) + ": "
try:
ret += str(self.target_classifier.classify(point))
except ClassifierError:
ret += '(unknown)'
ret += ')'
return ret
################################################################################
############################# k-NEAREST NEIGHBORS ##############################
################################################################################
class Point(object):
"""A Point has a name and a list or tuple of coordinates, and optionally a
classification, and/or alpha value."""
def __init__(self, coords, classification=None, name=None):
self.name = name
self.coords = coords
self.classification = classification
def copy(self):
return deepcopy(self)
def __getitem__(self, i): # make Point iterable
return self.coords[i]
def __eq__(self, other):
try:
return (self.coords == other.coords
and self.classification == other.classification)
except:
return False
def __str__(self):
ret = "Point(" + str(self.coords)
if self.classification:
ret += ", " + str(self.classification)
if self.name:
ret += ", name=" + str(self.name)
ret += ")"
return ret
__repr__ = __str__
################################################################################
############################### OTHER FUNCTIONS ################################
################################################################################
def is_class_instance(obj, class_name):
return hasattr(obj, '__class__') and obj.__class__.__name__ == class_name
def isinstance_Classifier(obj):
return is_class_instance(obj, 'Classifier')
def isinstance_IdentificationTreeNode(obj):
return is_class_instance(obj, 'IdentificationTreeNode')
def isinstance_Point(obj):
return is_class_instance(obj, 'Point')
#id_tree
from api import *
import math
log2 = lambda x: math.log(x, 2)
INF = float('inf')
import pandas as pd
def id_tree_classify_point(point, id_tree):
if id_tree.is_leaf():
return id_tree.get_node_classification()
else:
new_tree = id_tree.apply_classifier(point)
get_point = id_tree_classify_point(point, new_tree)
return get_point
def split_on_classifier(data, classifier):
"""Given a set of data (as a list of points) and a Classifier object, uses
the classifier to partition the data. Returns a dict mapping each feature
values to a list of points that have that value."""
#Dictionary which will contain the data after classification.
class_dict = {}
#Iterating through all the points in data
for i in range(len(data)):
get_value = classifier.classify(data[i])
if get_value not in class_dict:
class_dict[get_value] = [data[i]]
else:
class_dict[get_value].append(data[i])
return class_dict
def branch_disorder(data, target_classifier):
"""Given a list of points representing a single branch and a Classifier
for determining the true classification of each point, computes and returns
the disorder of the branch."""
#Getting data after classification based on the target_classifier
class_dict = split_on_classifier(data, target_classifier)
if (len(class_dict) == 1):
#Homogenous condition
return 0
else:
disorder = 0
for i in class_dict:
get_len = len(class_dict[i])
p_term = get_len/ float(len(data))
disorder += (-1) * p_term * log2(p_term)
return disorder
def average_test_disorder(data, test_classifier, target_classifier):
"""Given a list of points, a feature-test Classifier, and a Classifier
for determining the true classification of each point, computes and returns
the disorder of the feature-test stump."""
average_disorder = 0.0
#Getting all the branches after applying test_classifer
get_branches = split_on_classifier(data, test_classifier)
#Iterating through the branches
for i in get_branches:
disorder = branch_disorder(get_branches[i], target_classifier)
average_disorder += disorder * (len(get_branches[i])/ float(len(data)))
return average_disorder
#### CONSTRUCTING AN ID TREE
def find_best_classifier(data, possible_classifiers, target_classifier):
"""Given a list of points, a list of possible Classifiers to use as tests,
and a Classifier for determining the true classification of each point,
finds and returns the classifier with the lowest disorder. Breaks ties by
preferring classifiers that appear earlier in the list. If the best
classifier has only one branch, raises NoGoodClassifiersError."""
#Base values to start with
best_classifier = average_test_disorder(data, possible_classifiers[0], target_classifier)
store_classifier = possible_classifiers[0]
#Iterating over the list of possible classifiers
for i in range(len(possible_classifiers)):
avg_disorder = average_test_disorder(data, possible_classifiers[i], target_classifier)
if avg_disorder < best_classifier:
best_classifier = avg_disorder
store_classifier = possible_classifiers[i]
get_branches = split_on_classifier(data, store_classifier)
if len(get_branches)==1:
#Only 1 branch present
raise NoGoodClassifiersError
else:
return store_classifier
def construct_greedy_id_tree(data, possible_classifiers, target_classifier, id_tree_node=None):
"""Given a list of points, a list of possible Classifiers to use as tests,
a Classifier for determining the true classification of each point, and
optionally a partially completed ID tree, returns a completed ID tree by
adding classifiers and classifications until either perfect classification
has been achieved, or there are no good classifiers left."""
#print data
#print "possible", possible_classifiers
#print "target", target_classifier
if id_tree_node == None:
#Creating a new tree
id_tree_node = IdentificationTreeNode(target_classifier)
if branch_disorder(data, target_classifier) == 0:
id_tree_node.set_node_classification(target_classifier.classify(data[0]))
else:
try:
#Getting the best classifier from the options available
best_classifier = find_best_classifier(data, possible_classifiers, target_classifier)
get_branches = split_on_classifier(data, best_classifier)
id_tree_node = id_tree_node.set_classifier_and_expand(best_classifier, get_branches)
#possible_classifiers.remove(best_classifier)
branches = id_tree_node.get_branches()
for i in branches:
construct_greedy_id_tree(get_branches[i], possible_classifiers, target_classifier, branches[i])
except NoGoodClassifiersError:
pass
return id_tree_node
possible_classifiers = [feature_test('age'),
feature_test('gender'),
feature_test('duration'),
feature_test('Mood')
]
df1 = pd.read_csv("data_form.csv")
#df1 = df1.drop("age", axis=1)
print(df1)
a = []
with open("data_form.csv") as myfile:
firstline = True
for line in myfile:
if firstline:
mykeys = "".join(line.split()).split(',')
firstline = False
else:
values = "".join(line.split()).split(',')
a.append({mykeys[n]:values[n] for n in range(0,len(mykeys))})
keys = a[0].keys()
print(keys)
with open('data_clean.csv', 'w') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(a)
print(a)
tar = feature_test('genre')
print(construct_greedy_id_tree(a, possible_classifiers, tar))
#book_suggestion
import random
#from out import *
def genre(Mood, age, gender, duration):
print("Hi")
res_0= input("What's your name?")
res_1 = input("How are you, "+str(res_0)+"?")
if res_1 in ("good","fine","ok","nice"):
print ("Oh nice")
else:
print("Oh! It's alright")
Mood = input("What is your current mood?")
age = input("What is your age range : 10-12, 12-15,13-14,15-18,18+?")
gender = input("What is your gender?")
duration = input("How long do you want to read : 1week, 2weeks, 3weeks, 3+weeks, 2hours")
def get_book(genre):
suggestions = []
genre_to_book = {"Fantasy":["Just me and my babysitter - Mercer Mayer","Just Grandpa and me - Mercer Mayer","Just me and my babysitter - Mercer Mayer",
"The new Potty - Mercer Mayer","I was so mad - Mercer Mayer","Just me and my puppy" ,"Just a mess" ,"Me too"
,"The new Baby","Just shopping with mom"],
"Encyclopedias":["Brain Power - Paul Mcevoy", "My best books of snakes Gunzi Chrisitian","MY best books of MOON Grahame,Ian",
"The book of Planets Twist,Clint", "Do stars have points? Melvin", "Young discover series:cells Discovery Channel"]
,
"Action" : ["The Kane Chronicle:The Throne of Fire s Book 2 Riordan,Rick",
"Zane : ninja of ice Farshtey, Greg",
"Escape from Sentai Mountain Farshtey, Greg",
"Percy jackson Rick Riordan",
"The Kane Chronicle:The Throne of Fire s Book 2 Rick Riordan"],
"Comic" : ["Double Dork Diaries Russell Rachel Renée",
"Dork Dairies Russell Rachel Renee",
"Dork Dairies Russell Rachel Renée"],
"Mystery" : ["Sparkling Cyanide Christie Agatha",
"Poirot's Early Cases: Agatha Christie",
"The Name of this Book is Secret Bosch,Pseudonyuous"],
"Biographies" :["All by myself Mercer Mayer", "D Days prett bryan",
"Snake Bite Lane Andrew"] }
if (genre == "Fantasy"):
suggestions = [random.sample(genre_to_book["Fantasy"], 3)]
elif (genre == "Action"):
suggestions = [random.sample(genre_to_book["Action"], 3)]
elif (genre == "Comic"):
suggestions = [random.sample(genre_to_book["Comic"], 3)]
elif (genre == "Mystery"):
suggestions = [random.sample(genre_to_book["Mystery"], 3)]
elif (genre == "Encyclopedias"):
suggestions = random.sample(genre_to_book["Encyclopedias"], 3)
elif (genre == "Biographies"):
suggestions = random.sample(genre_to_book["Biographies"], 3)
return suggestions
print(get_book(genre(Mood, age, gender, duration)))
I want the program to not depend on out.py and and run on the information of id tree
The current implementation of the suggestions works by asking the user for a genre, then looking up a list of book titles in a dictionary using that genre as the key, then randomly selecting one of the titles and printing it. The current implementation also (presumably) constructs a IdentificationTreeNode containing recommendations, but then does nothing with it except printing it to the standard output.
The next step would be to not discard the tree, but save it in a variable and use in the recommendation process. Since the class structure is not given, it is not clear how this could be done, but it seems a reasonable assumption that it is possible to provide a keyword (the genre) and receive some collection of objects where each one contains data on a recommendation.
If constructing the IdentificationTreeNode is too costly to run on each recommendation request, it is possible to split the construction into its own script file and using python's pickle package to save the object in a file that can then be unpickled more quickly in the script performing the recommendations.
Im trying to simulate two machines working, and failing at random times. When they fail they call assistance. These two machines is part of bigger system of different machines, which needs to know when its neighbor has failed to do its job.
So far, I have made the simulate of the two machines, but I cant figure out how to send messages to their neighbors without each machine needing to know the whole system?
This is what I have so far:
import simpy
import random
random_seed=42
MTTF = 3500
break_mean = 1 / MTTF
sim_time = 4 * 7*24*60 # 4 weeks 24/7
num_machines = 2
rep_time = 30
tpp = 20 #20 minutes to make each part
neighbour = 3 #How many should it send to?
#Creating a class called messaging which is an object.
class messaging(object):
#DEfing the initilizing function, and initilize self, Environment, and capacity which is set to infinity, by a simpy core-function.
def __init__(self, env, capacity=simpy.core.Infinity):
self.env = env
self.capacity = capacity
self.pipes = []
#Making a function which work on everything that is a part of the message. With name Put.
def put(self, value):
if not self.pipes: #How to get this error?
raise runtime_error('There are no output pipes.')
#Create a variable, events, store to it pipe values
events = broken_machine()
return self.env.all_of(events)
def get_output_conn(self):
#Set the capacity of pipe variable to store infinity.
pipe = simpy.Store(self.env, capacity=self.capacity)
#to each pipes, add(or append) pipe
self.pipes.append(pipe)
return pipe
def mesg_generator(number, env, out_pipe):
msg = ('Failed')
def message_reciever(name, env, in_pipe):
while True:
msg = yield in_pipe.get()
print("%s received message: %s" % (number, msg[1]))
def time_per_part():
return tpp
def ttf():
return random.expovariate(break_mean)
class Machine(object):
def __init__(self, env, number, repair):
#self.arg = arg
self.env = env
self.number = number
self.parts_made = 0
self.times_broken = 0
self.broken = False
self.process = env.process(self.working(repair))
env.process(self.broken_machine())
def working(self, repair):
while True:
work = time_per_part()
while work:
try:
begin = self.env.now
yield self.env.timeout(work)
work = 0
except simpy.Interrupt:
self.broken = True
work -= self.env.now - begin
with repair.request(priority = 1) as req:
yield req
yield self.env.timeout(rep_time)
self.times_broken +=1
yield message_reciever()
#print('Machine down')
self.broken = False #Machine fixed again
self.parts_made +=1
def broken_machine(self):
while True:
yield self.env.timeout(ttf())
if not self.broken:
self.process.interrupt()
def other_jobs(env, repair):
while True:
work = tpp
while work:
with repair.request(priority=2) as req:
yield req
try:
begin = env.now
yield env.timeout(work)
work = 0
except simpy.Interrupt:
work -= env.now - begin
print("This simulates machines 3 and 4 doing the same tasks.")
random.seed(random_seed)
env = simpy.Environment()
pipe = simpy.Store(env)
bc_pipe = messaging(env)
repair = simpy.PreemptiveResource(env, capacity = 1)
machines = [Machine(env, 'Machine %d' % i, repair)
for i in range(num_machines)]
env.process(other_jobs(env, repair))
env.run(until=sim_time)
#Show how many times each machine failed:
for machine in machines:
print("%s broke down %d times" %(machine.number, machine.times_broken))
At the moment I'm trying to handle results from a calculation which come in very fast. At first I inserted each simulation result into an sqlite database but it turned out to be the bottleneck of the entire calculation. So I ended up using cursor.executemany instead of cursor.execute which is much faster.
My problem is now that I'm somehow not able to implement a thread safe counter.
The executemany task should be run every 1000 calculations. Therefore I implemented an initializer with a multiprocessing.Value I also tried this solution (http://eli.thegreenplace.net/2012/01/04/shared-counter-with-pythons-multiprocessing) but somehow some values of the counter are duplicates which ends up in running the executemany task to often or not at all.
If anybody has an idea how to solve this issue I'd really appreciate it.
Here's a minimum sample:
import multiprocessing, sqlite3
from multiprocessing import Value, Lock
from itertools import repeat
def worker(Testvalues, TotalValues):
MP_counter.value += 1
counter.increment()
con = sqlite3.connect("Test.db", timeout=30.0)
cur = con.cursor()
# Minimum sample:
Helper = list(range(5))
Helper = [x * Testvalues for x in Helper]
GList.append(Helper)
Execute_Every = 10
print("Counter class: %d" % (counter.value()))
print("MP_counter: %d" % (MP_counter.value))
if counter.value() % Execute_Every == 0 or counter.value() == TotalValues - 1:
print("Execute query")
print("Counter class: %d" % (counter.value()))
print("MP_counter: %d" % (MP_counter.value))
Helper = [tuple(row) for row in GList[:Execute_Every]]
del GList[:Execute_Every]
cur.executemany(
"INSERT INTO Test (One, Two, Three, Four, Five) VALUES (?, ?, ?, ?, ?);", Helper)
con.commit()
con.close()
def setup(t, g, c):
global MP_counter
global GList
global counter
MP_counter = t
GList = g
counter = c
class Counter(object):
def __init__(self, initval=0):
self.val = Value('i', initval)
self.lock = Lock()
def increment(self):
with self.lock:
self.val.value += 1
def value(self):
with self.lock:
return self.val.value
if __name__ == '__main__':
m = multiprocessing.Manager()
CPUS = multiprocessing.cpu_count()
MP_counter = multiprocessing.Value('i', 0)
GList = m.list([])
thread_safe_counter = Counter(0)
l = multiprocessing.Lock()
WORKERS = multiprocessing.Pool(initializer=setup, initargs=[MP_counter, GList, thread_safe_counter],processes=CPUS)
con = sqlite3.connect("Test.db", timeout=30.0)
cur = con.cursor()
cur.execute('PRAGMA journal_mode=wal')
SQLCommand = "CREATE TABLE IF NOT EXISTS Test (One INT, Two INT, Three INT, Four INT, Five INT);"
cur.execute(SQLCommand)
con.close()
TotalValues = 100
Testvalues = list(range(TotalValues))
WORKERS.starmap(worker, zip(Testvalues, repeat(TotalValues)))
WORKERS.close()
WORKERS.join()
#Check if list is empty
print(GList)
Thank you guys :)
Your counter has an increment() and a value() method, which need to be called separately, so to make this safe you'd have to call both operations while holding the lock. Your increment() method should return the new value after incrementing it, and you should use that without further calls to value(), e.g:
class Counter(object):
def __init__(self, initval=0):
self.val = Value('i', initval)
self.lock = Lock()
def increment(self):
with self.lock:
self.val.value += 1
return self.val.value
...
def worker(Testvalues, TotalValues):
counter_value = counter.increment()
# use only counter_value from here on
...
Also, a Value is already created with a default RLock, which can be overridden in the constructor call with a different lock type if needed. So you don't really need to allocate your own lock, you could just use:
class Counter(object):
def __init__(self, initval=0):
self.val = Value('i', initval)
# or Value('i', initval, lock=Lock())
def increment(self):
with self.val.get_lock():
self.val.value += 1
return self.val.value