Pygal bar chart says “No data” - python-3.x

I am trying to create a bar graph in pygal that uses the api for hacker news and charts the most active news based on comments. I posted my code below, but I cannot figure out why my graph keep saying "No data"??? Any suggestions? Thanks!
import requests
import pygal
from pygal.style import LightColorizedStyle as LCS, LightenStyle as LS
from operator import itemgetter
# Make an API call, and store the response.
url = 'https://hacker-news.firebaseio.com/v0/topstories.json'
r = requests.get(url)
print("Status code:", r.status_code)
# Process information about each submission.
submission_ids = r.json()
submission_dicts = []
for submission_id in submission_ids[:30]:
# Make a separate API call for each submission.
url = ('https://hacker-news.firebaseio.com/v0/item/' +
str(submission_id) + '.json')
submission_r = requests.get(url)
print(submission_r.status_code)
response_dict = submission_r.json()
submission_dict = {
'comments': int(response_dict.get('descendants', 0)),
'title': response_dict['title'],
'link': 'http://news.ycombinator.com/item?id=' + str(submission_id),
}
submission_dicts.append(submission_dict)
# Visualization
my_style = LS('#336699', base_style=LCS)
my_config = pygal.Config()
my_config.show_legend = False
my_config.title_font_size = 24
my_config.label_font_size = 14
my_config.major_label_font_size = 18
my_config.show_y_guides = False
my_config.width = 1000
chart = pygal.Bar(my_config, style=my_style)
chart.title = 'Most Active News on Hacker News'
chart.add('', submission_dicts)
chart.render_to_file('hn_submissons_repos.svg')

The values in the array passed to the add function need to be either numbers or dicts that contain the key value (or a mixture of the two). The simplest solution would be to change the keys used when creating submission_dict:
submission_dict = {
'value': int(response_dict.get('descendants', 0)),
'label': response_dict['title'],
'xlink': 'http://news.ycombinator.com/item?id=' + str(submission_id),
}
Notice that link has become xlink, this is one of the optional parameters that are defined in the Value Configuration section of the pygal docs.

Related

Cognitive Services - How to add multiple Faces from a Live Stream to Azure FaceList ? (Python)

Problem Background:
I have created an Azure FaceList and I am using my webcam to capture live feed and:
sending the stream to Azure Face Detect
getting Face Rectangle returned by Face Detect
using the returned Face rectangle to add Face Detected from Live Video Stream to my FaceList.
(I need to create Face List in order to solve the problem I explained in my other question which is answered by Nicolas, which is what I am following)
Problem Details:
According to Azure FaceList documentation at https://learn.microsoft.com/en-us/rest/api/cognitiveservices/face/facelist/addfacefromstream ,if there are multiple faces in the image, we need to specify the target Face to add to Azure FaceList.
The Problem is, What if we need to add all the detected faces (multiple faces) in Face List? Suppose there are 2 or more faces in a single frame of video, then how can I add those two Faces to Face List?
I have tried adding the face rectangles returned from Azure Face Detect into a Python List and then iterating Over List indexes, so that each face Rectangle can be passed to Azure FaceList one-by-one. But no use.
Still getting the error:
There are more than one faces in the image
My Code:
face_list_id = "newtest-face-list"
vid = cv2.VideoCapture(0)
count = 0
face_ids_live_Detected = [] #This list will store faceIds from detected faces
list_of_face_rectangles = []
face_rect_counter=0
while True:
ret, frame = vid.read()
check,buffer = cv2.imencode('.jpg', frame)
img = cv2.imencode('.jpg', frame)[1].tobytes()
base64_encoded = base64.b64encode(buffer).decode()
print(type(img))
detected_faces = utils.detect_face_stream(endpoint=ENDPOINT, key=KEY, image=img,face_attributes=attributes,recognition_model='recognition_03')
print('Image num {} face detected {}'.format(count, detected_faces))
count += 1
color = (255, 0, 0)
thickness = 2
for face in detected_faces:
detected_face_id = face['faceId']
face_ids_live_Detected.append(detected_face_id)
detected_face_rectangle = face['faceRectangle']
list_of_face_rectangles.append(detected_face_rectangle)
print("detected rectangle =",detected_face_rectangle)
face_rect_for_facelist = list_of_face_rectangles[face_rect_counter]
face_rect_counter +=1
frame = cv2.rectangle(frame, *utils.get_rectangle(face), color, thickness)
cv2.imshow('frame', frame)
for face_id_live in face_ids_live_Detected:
similar_faces = face_client.face.find_similar(face_id=face_id_live, face_list_id=face_list_id)
if not similar_faces:
print('No similar faces found !')
print('Adding Unknown Face to FaceList...')
facelist_result = utils.facelist_add(endpoint=ENDPOINT, key=KEY, face_list_id=face_list_id,data=img,params=face_rect_for_facelist)
persisted_face_id = facelist_result['persistedFaceId']
else:
print('Similar Face Found!')
for similar_face in similar_faces:
face_id_similar = similar_face.face_id
print("Confidence: "+str(similar_face.confidence))
From my utils file, code for function facelist_add is as follows:
def facelist_add(endpoint, key, face_list_id, data=None, json=None, headers=None,params=None, targetFace=None):
# pylint: disable=too-many-arguments
"""Universal interface for request."""
method = 'POST'
url = endpoint + '/face/v1.0/facelists/'+face_list_id+'/persistedfaces'
# Make it possible to call only with short name (without BaseUrl).
if not url.startswith('https://'):
url = BaseUrl.get() + url
params={}
# Setup the headers with default Content-Type and Subscription Key.
headers = headers or {}
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/octet-stream'
headers['Ocp-Apim-Subscription-Key'] = key
params['detectionModel']='detection_03'
response = requests.request(
method,
url,
params=params,
data=data,
json=json,
headers=headers)
if response.text:
result = response.json()
else:
result = {}
return result
When you have several faces in a picture, you have to provide a 'targetFace' in your call to AddFace:
A face rectangle to specify the target face to be added into the face
list, in the format of "targetFace=left,top,width,height". E.g.
"targetFace=10,10,100,100". If there is more than one face in the
image, targetFace is required to specify which face to add. No
targetFace means there is only one face detected in the entire image.
See API documentation for this method: https://westeurope.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250
Thanks to everyone who helped and especially Nicolas R. I just found a mistake and corrected it. Now the program is working like Charm.
Actually, Azure 'Face Detect' returns face Rectangle in top,left,width,height sequence which I was feeding directly to the faceList's targetFace.
Now I just swapped the first two values of Face Rectangle and it becomes left,top,width,height which is what the documentation says, and it's working fine now.
Solution:
I have added a new function that takes the faceRectangle dictionary and swap first two values.
list_of_faceRect_valuesonly=[]
def face_rect_values(faceRect_dict):
temp_list=[]
for key,value in faceRect_dict.items():
temp_list.append(value)
temp_list[0], temp_list[1] = temp_list[1], temp_list[0]
list_of_faceRect_valuesonly.append(temp_list)
In order to extract values from list, I did following:
face_rect_counter=0
face_rect_for_facelist = list_of_faceRect_valuesonly[face_rect_counter]
face_rect_counter +=1
Request to facelist_add function:
facelist_result = utils.facelist_add(endpoint=ENDPOINT, key=KEY, face_list_id=face_list_id,targetFace=face_rect_for_facelist,data=img)
I have also changed my facelist_add function a little bit:
def facelist_add(endpoint, key, face_list_id, targetFace=[],data=None ,jsondata=None, headers=None):
# pylint: disable=too-many-arguments
"""Universal interface for request."""
method = 'POST'
url = endpoint + '/face/v1.0/facelists/'+face_list_id+'/persistedfaces'
# Make it possible to call only with short name (without BaseUrl).
if not url.startswith('https://'):
url = BaseUrl.get() + url
params={}
# Setup the headers with default Content-Type and Subscription Key.
headers = headers or {}
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/octet-stream'
headers['Ocp-Apim-Subscription-Key'] = key
list_of_targetfaces =[]
list_of_targetfaces.append(targetFace)
params={'targetFace':json.dumps(targetFace)}
params = {'targetFace': ','.join(map(str,targetFace))}
print("Printing TargetFaces(facelist_add function) ...",params['targetFace'])
params['detectionModel']='detection_03'
url=url + "?"
response = requests.post(url,params=params,data=data,headers=headers)
print("Request URL: ", response.url)
result = None
# Prevent `response.json()` complains about empty response.
if response.text:
result = response.json()
else:
result = {}
return result

Linkedin web scraping snippet

I'm doing a web scraping data university research project. I started working on a ready GitHub project, but this project does not retrieve all the data.
The project works like this:
Search Google using keywords: example: (accountant 'email me at' Google)
Extract a snippet.
Retrieve data from this snippet.
The issue is:
The snippets extracted are like this: " ... marketing division in 2009. For more information on career opportunities with our company, email me: vicki#productivedentist.com. Neighborhood Smiles, LLC ..."
The snippet does not show all, the "..." hides information like role, location... How can I retrieve all the information with the script?
from googleapiclient.discovery import build #For using Google Custom Search Engine API
import datetime as dt #Importing system date for the naming of the output file.
import sys
from xlwt import Workbook #For working on xls file.
import re #For email search using regex.
if __name__ == '__main__':
# Create an output file name in the format "srch_res_yyyyMMdd_hhmmss.xls in output folder"
now_sfx = dt.datetime.now().strftime('%Y%m%d_%H%M%S')
output_dir = './output/'
output_fname = output_dir + 'srch_res_' + now_sfx + '.xls'
search_term = sys.argv[1]
num_requests = int(sys.argv[2])
my_api_key = "replace_with_you_api_key" #Read readme.md to know how to get you api key.
my_cse_id = "011658049436509675749:gkuaxghjf5u" #Google CSE which searches possible LinkedIn profile according to query.
service = build("customsearch", "v1", developerKey=my_api_key)
wb=Workbook()
sheet1 = wb.add_sheet(search_term[0:15])
wb.save(output_fname)
sheet1.write(0,0,'Name')
sheet1.write(0,1,'Profile Link')
sheet1.write(0,2,'Snippet')
sheet1.write(0,3,'Present Organisation')
sheet1.write(0,4,'Location')
sheet1.write(0,5,'Role')
sheet1.write(0,6,'Email')
sheet1.col(0).width = 256 * 20
sheet1.col(1).width = 256 * 50
sheet1.col(2).width = 256 * 100
sheet1.col(3).width = 256 * 20
sheet1.col(4).width = 256 * 20
sheet1.col(5).width = 256 * 50
sheet1.col(6).width = 256 * 50
wb.save(output_fname)
row = 1 #To insert the data in the next row.
#Function to perform google search.
def google_search(search_term, cse_id, start_val, **kwargs):
res = service.cse().list(q=search_term, cx=cse_id, start=start_val, **kwargs).execute()
return res
for i in range(0, num_requests):
# This is the offset from the beginning to start getting the results from
start_val = 1 + (i * 10)
# Make an HTTP request object
results = google_search(search_term,
my_cse_id,
start_val,
num=10 #num value can be 1 to 10. It will give the no. of results.
)
for profile in range (0, 10):
snippet = results['items'][profile]['snippet']
myList = [item for item in snippet.split('\n')]
newSnippet = ' '.join(myList)
contain = re.search(r'[\w\.-]+#[\w\.-]+', newSnippet)
if contain is not None:
title = results['items'][profile]['title']
link = results['items'][profile]['link']
org = "-NA-"
location = "-NA-"
role = "-NA-"
if 'person' in results['items'][profile]['pagemap']:
if 'org' in results['items'][profile]['pagemap']['person'][0]:
org = results['items'][profile]['pagemap']['person'][0]['org']
if 'location' in results['items'][profile]['pagemap']['person'][0]:
location = results['items'][profile]['pagemap']['person'][0]['location']
if 'role' in results['items'][profile]['pagemap']['person'][0]:
role = results['items'][profile]['pagemap']['person'][0]['role']
print(title[:-23])
sheet1.write(row,0,title[:-23])
sheet1.write(row,1,link)
sheet1.write(row,2,newSnippet)
sheet1.write(row,3,org)
sheet1.write(row,4,location)
sheet1.write(row,5,role)
sheet1.write(row,6,contain[0])
print('Wrote {} search result(s)...'.format(row))
wb.save(output_fname)
row = row + 1
print('Output file "{}" written.'.format(output_fname))

Server side previous page call not working

I configured the server-side processing of datatables. On the server side I use python3 and mongodb.
I think my paging logic is good as you can see from the code:
PYTHON:
#bp.route('/_ajax_products', methods=['GET', 'POST'])
#login_required
def ajax_products():
num = int(request.args.get('page_num')) + 1
total_items = product_db.count()
items_to_show = 100
result = {"draw": num, "recordsTotal": total_items, "recordsFiltered": total_items}
list_prod = product_db.find().sort([("Code", 1)]).skip(items_to_show * (num - 1)).limit(items_to_show)
final_list = []
for i in list_prod:
# iteration on products and addition to the final list
result['data'] = final_list
return jsonify(result)
DATATABLE INITIALISATION:
$('#ProductsList').DataTable({
"dom": 'Brlf<t><"clear">p',
"pageLength": 100,
select: true,
"processing": true,
"serverSide": true,
"ajax": {
url:"/_ajax_products",
data: function ( d ) {
var datatable = $('#ProductsList').DataTable();
var currentPage = datatable.page.info().page;
d.page_num = currentPage;
}
},
"columns":[...]
...
})
The data loads well in my datatable. When I call the next page either there is no problem.
The problem appears when I call a previous page.
The display starts on page 1. when I press for example the pagination button 3, I can see in my console:
"GET /_ajax_products?draw=3&
But when I try to go back to page 1, the draw parameter goes to 4:
"GET /_ajax_products?draw=4&
... and it continues to increment.
On the server side the good data are found but they are not displayed in the datatable.
How can I solve this problem?
I finally found a solution. The error came from a misunderstanding of what the draw option was doing.
Indeed I thought that the value of draw corresponded to the page to be displayed, which is not the case.
Here is the new version of the code in case it can help someone:
#bp.route('/_ajax_products', methods=['GET', 'POST'])
#login_required
def ajax_products():
num = int(request.args.get('page_num')) + 1
total_items = product_db.count()
items_to_show = 100
result = {"recordsTotal": total_items, "recordsFiltered": total_items}
list_prod = product_db.find().sort([("Code", 1)]).skip(items_to_show * (num - 1)).limit(items_to_show)
final_list = []
for i in list_prod:
# iteration on products and addition to the final list
result['data'] = final_list
return jsonify(result)

Updating multiple line plots dynamically in callback in bokeh

I have a use case where I have multiple line plots (with legends), and I need to update the line plots based on a column condition. Below is an example of two data set, based on the country, the column data source changes. But the issue I am facing is, the number of columns is not fixed for the data source, and even the types can vary. So, when I update the data source based on a callback when there is a new country selected, I get this error:
Error: attempted to retrieve property array for nonexistent field 'pay_conv_7d.content'.
I am guessing because in the new data source, the pay_conv_7d.content column doesn't exist, but in my plot those lines were already there. I have been trying to fix this issue by various means (making common columns for all country selection - adding the missing column in the data source in callback, but still get issues.
Is there any clean way to have multiple line plots updating using callback, and not do a lot of hackish way? Any insights or help would be really appreciated. Thanks much in advance! :)
def setup_multiline_plots(x_axis, y_axis, title_text, data_source, plot):
num_categories = len(data_source.data['categories'])
legends_list = list(data_source.data['categories'])
colors_list = Spectral11[0:num_categories]
# xs = [data_source.data['%s.'%x_axis].values] * num_categories
# ys = [data_source.data[('%s.%s')%(y_axis,column)] for column in data_source.data['categories']]
# data_source.data['x_series'] = xs
# data_source.data['y_series'] = ys
# plot.multi_line('x_series', 'y_series', line_color=colors_list,legend='categories', line_width=3, source=data_source)
plot_list = []
for (colr, leg, column) in zip(colors_list, legends_list, data_source.data['categories']):
xs, ys = '%s.'%x_axis, ('%s.%s')%(y_axis,column)
plot.line(xs,ys, source=data_source, color=colr, legend=leg, line_width=3, name=ys)
plot_list.append(ys)
data_source.data['plot_names'] = data_source.data.get('plot_names',[]) + plot_list
plot.title.text = title_text
def update_plot(country, timeseries_df, timeseries_source,
aggregate_df, aggregate_source, category,
plot_pay_7d, plot_r_pay_90d):
aggregate_metrics = aggregate_df.loc[aggregate_df.country == country]
aggregate_metrics = aggregate_metrics.nlargest(10, 'cost')
category_types = list(aggregate_metrics[category].unique())
timeseries_df = timeseries_df[timeseries_df[category].isin(category_types)]
timeseries_multi_line_metrics = get_multiline_column_datasource(timeseries_df, category, country)
# len_series = len(timeseries_multi_line_metrics.data['time.'])
# previous_legends = timeseries_source.data['plot_names']
# current_legends = timeseries_multi_line_metrics.data.keys()
# common_legends = list(set(previous_legends) & set(current_legends))
# additional_legends_list = list(set(previous_legends) - set(current_legends))
# for legend in additional_legends_list:
# zeros = pd.Series(np.array([0] * len_series), name=legend)
# timeseries_multi_line_metrics.add(zeros, legend)
# timeseries_multi_line_metrics.data['plot_names'] = previous_legends
timeseries_source.data = timeseries_multi_line_metrics.data
aggregate_source.data = aggregate_source.from_df(aggregate_metrics)
def get_multiline_column_datasource(df, category, country):
df_country = df[df.country == country]
df_pivoted = pd.DataFrame(df_country.pivot_table(index='time', columns=category, aggfunc=np.sum).reset_index())
df_pivoted.columns = df_pivoted.columns.to_series().str.join('.')
categories = list(set([column.split('.')[1] for column in list(df_pivoted.columns)]))[1:]
data_source = ColumnDataSource(df_pivoted)
data_source.data['categories'] = categories
Recently I had to update data on a Multiline glyph. Check my question if you want to take a look at my algorithm.
I think you can update a ColumnDataSource in three ways at least:
You can create a dataframe to instantiate a new CDS
cds = ColumnDataSource(df_pivoted)
data_source.data = cds.data
You can create a dictionary and assign it to the data attribute directly
d = {
'xs0': [[7.0, 986.0], [17.0, 6.0], [7.0, 67.0]],
'ys0': [[79.0, 69.0], [179.0, 169.0], [729.0, 69.0]],
'xs1': [[17.0, 166.0], [17.0, 116.0], [17.0, 126.0]],
'ys1': [[179.0, 169.0], [179.0, 1169.0], [1729.0, 169.0]],
'xs2': [[27.0, 276.0], [27.0, 216.0], [27.0, 226.0]],
'ys2': [[279.0, 269.0], [279.0, 2619.0], [2579.0, 2569.0]]
}
data_source.data = d
Here if you need different sizes of columns or empty columns you can fill the gaps with NaN values in order to keep column sizes. And I think this is the solution to your question:
import numpy as np
d = {
'xs0': [[7.0, 986.0], [17.0, 6.0], [7.0, 67.0]],
'ys0': [[79.0, 69.0], [179.0, 169.0], [729.0, 69.0]],
'xs1': [[17.0, 166.0], [np.nan], [np.nan]],
'ys1': [[179.0, 169.0], [np.nan], [np.nan]],
'xs2': [[np.nan], [np.nan], [np.nan]],
'ys2': [[np.nan], [np.nan], [np.nan]]
}
data_source.data = d
Or if you only need to modify a few values then you can use the method patch. Check the documentation here.
The following example shows how to patch entire column elements. In this case,
source = ColumnDataSource(data=dict(foo=[10, 20, 30], bar=[100, 200, 300]))
patches = {
'foo' : [ (slice(2), [11, 12]) ],
'bar' : [ (0, 101), (2, 301) ],
}
source.patch(patches)
After this operation, the value of the source.data will be:
dict(foo=[11, 22, 30], bar=[101, 200, 301])
NOTE: It is important to make the update in one go to avoid performance issues

Assign Class attributes from list elements

I'm not sure if the title accurately describes what I'm trying to do. I have a Python3.x script that I wrote that will issue flood warning to my facebook page when the river near my home has reached it's lowest flood stage. Right now the script works, however it only reports data from one measuring station. I would like to be able to process the data from all of the stations in my county (total of 5), so I was thinking that maybe a class method may do the trick but I'm not sure how to implement it. I've been teaching myself Python since January and feel pretty comfortable with the language for the most part, and while I have a good idea of how to build a class object I'm not sure how my flow chart should look. Here is the code now:
#!/usr/bin/env python3
'''
Facebook Flood Warning Alert System - this script will post a notification to
to Facebook whenever the Sabine River # Hawkins reaches flood stage (22.3')
'''
import requests
import facebook
from lxml import html
graph = facebook.GraphAPI(access_token='My_Access_Token')
river_url = 'http://water.weather.gov/ahps2/river.php?wfo=SHV&wfoid=18715&riverid=203413&pt%5B%5D=147710&allpoints=143204%2C147710%2C141425%2C144668%2C141750%2C141658%2C141942%2C143491%2C144810%2C143165%2C145368&data%5B%5D=obs'
ref_url = 'http://water.weather.gov/ahps2/river.php?wfo=SHV&wfoid=18715&riverid=203413&pt%5B%5D=147710&allpoints=143204%2C147710%2C141425%2C144668%2C141750%2C141658%2C141942%2C143491%2C144810%2C143165%2C145368&data%5B%5D=all'
def checkflood():
r = requests.get(river_url)
tree = html.fromstring(r.content)
stage = ''.join(tree.xpath('//div[#class="stage_stage_flow"]//text()'))
warn = ''.join(tree.xpath('//div[#class="current_warns_statmnts_ads"]/text()'))
stage_l = stage.split()
level = float(stage_l[2])
#check if we're at flood level
if level < 22.5:
pass
elif level == 37:
major_diff = level - 23.0
major_r = ('The Sabine River near Hawkins, Tx has reached [Major Flood Stage]: #', stage_l[2], 'Ft. ', str(round(major_diff, 2)), ' Ft. \n Please click the link for more information.\n\n Current Warnings and Alerts:\n ', warn)
major_p = ''.join(major_r)
graph.put_object(parent_object='me', connection_name='feed', message = major_p, link = ref_url)
<--snip-->
checkflood()
Each station has different 5 different catagories for flood stage: Action, Flood, Moderate, Major, each different depths per station. So for Sabine river in Hawkins it will be Action - 22', Flood - 24', Moderate - 28', Major - 32'. For the other statinos those depths are different. So I know that I'll have to start out with something like:
class River:
def __init__(self, id, stage):
self.id = id #station ID
self.stage = stage #river level'
#staticmethod
def check_flood(stage):
if stage < 22.5:
pass
elif stage.....
but from there I'm not sure what to do. Where should it be added in(to?) the code, should I write a class to handle the Facebook postings as well, is this even something that needs a class method to handle, is there any way to clean this up for efficiency? I'm not looking for anyone to write this up for me, but some tips and pointers would sure be helpful. Thanks everyone!
EDIT Here is what I figured out and is working:
class River:
name = ""
stage = ""
action = ""
flood = ""
mod = ""
major = ""
warn = ""
def checkflood(self):
if float(self.stage) < float(self.action):
pass
elif float(self.stage) >= float(self.major):
<--snip-->
mineola = River()
mineola.name = stations[0]
mineola.stage = stages[0]
mineola.action = "13.5"
mineola.flood = "14.0"
mineola.mod = "18.0"
mineola.major = "21.0"
mineola.alert = warn[0]
hawkins = River()
hawkins.name = stations[1]
hawkins.stage = stages[1]
hawkins.action = "22.5"
hawkins.flood = "23.0"
hawkins.mod = "32.0"
hawkins.major = "37.0"
hawkins.alert = warn[1]
<--snip-->
So from here I'm tring to stick all the individual river blocks into one block. What I have tried so far is this:
class River:
... name = ""
... stage = ""
... def testcheck(self):
... return self.name, self.stage
...
>>> for n in range(num_river):
... stations[n] = River()
... stations[n].name = stations[n]
... stations[n].stage = stages[n]
...
>>> for n in range(num_river):
... stations[n].testcheck()
...
<__main__.River object at 0x7fbea469bc50> 4.13
<__main__.River object at 0x7fbea46b4748> 20.76
<__main__.River object at 0x7fbea46b4320> 22.13
<__main__.River object at 0x7fbea46b4898> 16.08
So this doesn't give me the printed results that I was expecting. How can I return the string instead of the object? Will I be able to define the Class variables in this manner or will I have to list them out individually? Thanks again!
After reading many, many, many articles and tutorials on class objects I was able to come up with a solution for creating the objects using list elements.
class River():
def __init__(self, river, stage, flood, action):
self.river = river
self.stage = stage
self.action = action
self.flood = flood
self.action = action
def alerts(self):
if float(self.stage < self.flood):
#alert = "The %s is below Flood Stage (%sFt) # %s Ft. \n" % (self.river, self.flood, self.stage)
pass
elif float(self.stage > self.flood):
alert = "The %s has reached Flood Stage(%sFt) # %sFt. Warnings: %s \n" % (self.river, self.flood, self.stage, self.action)
return alert
'''this is the function that I was trying to create
to build the class objects automagically'''
def riverlist():
river_list = []
for n in range(len(rivers)):
station = River(river[n], stages[n], floods[n], warns[n])
river_list.append(station)
return river_list
if __name__ == '__main__':
for x in riverlist():
print(x.alerts())

Resources