Hey guys I am trying to select my favorite person from the table by using their first name and then print out their first and middle name.I am also trying to make sure to parameterize the query. When I try to run the code it says KeyError: 'first_name' in my for loop of the assignmentRecords towards the bottom. Is even a step in the right direction or totally wrong?
# =============#
# imports #
# =============#
import sqlite3 #for working with sqlite version 3 databases
# ===============#
# constants #
# ===============#
SQL_FILE = "assignment_6.01-JonathanMaldonado.db"
SQL_CREATE_TABLE = """
CREATE TABLE person
(
person_id INTEGER PRIMARY KEY,
first_name TEXT,
middle_name TEXT,
age TEXT,
favorite_thing TEXT
)"""
SQL_DELETE_ASSIGNMENT = """
DELETE FROM person
WHERE person_id = ?
"""
SQL_INSERT_ASSIGNMENT = """
INSERT INTO person
(first_name,middle_name,age,favorite_thing)
VALUES
(?,?,?,?)
"""
SQL_SELECT_ASSIGNMENTS = """
SELECT
person_id,
first_name,
middle_name,
age,
favorite_thing
FROM person
"""
SQL_UPDATE_ASSIGNMENT = """
UPDATE person
SET age = ?
WHERE person_id = ?
"""
# ===============#
# functions #
# ===============#
def createPersonTable(dbConnection):
""" creates an assignment database table """
#fill in the function to create the table
#the SQL statement is provided as a constant
dbCursor = dbConnection.cursor()
dbCursor.execute(SQL_CREATE_TABLE)
dbCursor.close()
def deleteAssignmentByID(dbConnection,AGE):
""" deletes assignment records by their ID """
#fill in the function to delete a record from the assignment table
#the SQL statement is provided as a constant
#be sure to parameterize as needed and check that 1 record was deleted
dbCursor = dbConnection.cursor()
arguments = [
AGE,
]
dbCursor.execute(SQL_DELETE_ASSIGNMENT,arguments)
#make sure the record was deleted successfully
numberOfRecordsInserted = dbCursor.rowcount
if(numberOfRecordsInserted != 1):
errorMessage = "{} records were inserted when there should have been 1"
raise RuntimeError(errorMessage.format(numberOfRecordsInserted))
#close the cursor
dbCursor.close()
def insertAssignment(dbConnection,person):
""" inserts assignment records one at a time """
#fill in the function to insert a record
#the SQL statement is provided as a constant
#be sure to parameterize as needed and check that 1 record was inserted
dbCursor = dbConnection.cursor()
arguments = [
person["first_name"],
person["middle_name"],
person["age"],
person["favorite_thing"],
]
dbCursor.execute(SQL_INSERT_ASSIGNMENT,arguments)
#make sure the record was inserted successfully
numberOfRecordsInserted = dbCursor.rowcount
if(numberOfRecordsInserted != 1):
errorMessage = "{} records were inserted when there should have been 1"
raise RuntimeError(errorMessage.format(numberOfRecordsInserted))
#close the cursor
dbCursor.close()
def selectAllAssignments(dbConnection):
""" returns a list of all assignment records """
#fill in the function to return a list of assignment records
#the SQL statement is provided as a constant
dbCursor = dbConnection.cursor()
dbCursor.execute(SQL_SELECT_ASSIGNMENTS)
#build list of assignment records
persons = []
for record in dbCursor:
person = {
"person_id": record[0],
" first_name": record[1],
"middle_name": record[2],
"age": record[3],
"favorite_thing": record[4]
}
persons.append(person)
#close the cursor and return the list
dbCursor.close()
return persons
# ==========#
# main #
# ==========#
def main():
""" main method """
#connect to the database using the file name provided as a constant
dbConnection = sqlite3.connect(SQL_FILE)
#dropping table in case you need to re-run this multiple times
#no need to change this part
dbCursor = dbConnection.cursor()
try:
dbCursor.execute("DROP TABLE IF EXISTS person")
except Exception as e:
dbConnection.close()
raise #stop the rest of the script
#create the assignment table
#use the createAssignmentTable function and wrap in an appropriate try block
try:
createPersonTable(dbConnection)
except Exception as e:
#close the connection and stop
dbConnection.close()
raise #stop the rest of the script
#loop through the following assignment records and insert them
#use the insertAssignment function and wrap in an appropriate try block
persons = [
{
"first_name": "Demi",
"middle_name": "Rose",
"age": "22",
"favorite_thing": "Cute and funny",
},
{
"first_name": "Esmeralda",
"middle_name": "Arciga",
"age": "48",
"favorite_thing": "Lovly Mother",
},
{
"first_name": "Dead",
"middle_name": "Pool",
"age": "32",
"favorite_thing": "Superhero",
}
]
for person in persons:
try:
insertAssignment(dbConnection,person)
except Exception as e:
#roll back the transaction and stop
dbConnection.rollback()
dbConnection.close()
raise #stop the rest of the script
else:
#commit the transaction
dbConnection.commit()
#select all of the assignment records and store in a variable
#use the selectAllAssignments function and wrap in an appropriate try block
try:
assignmentRecords = selectAllAssignments(dbConnection)
except Exception as e:
#roll back the transaction and stop
dbConnection.rollback()
dbConnection.close()
raise #stop the rest of the script
else:
pass #no need to commit since it was just a select
#loop through the assignment records
#print the title and due date of each and then delete that record
#use the deleteAssignmentByID function and wrap in an appropriate try block
for person in assignmentRecords:
try:
print("{} {} is {} old and {}".format(person["first_name"],person["middle_name"],person["age"],person["favorite_thing"]))
deleteAssignmentByID(dbConnection,person["person_id"])
except Exception as e:
#roll back the transaction and stop
dbConnection.rollback()
dbConnection.close()
raise #stop the rest of the script
else:
#commit the transaction
dbConnection.commit()
#close the database connection
dbConnection.close()
# kick off main
if(__name__ == "__main__"):
main()
Related
I have to process an item report CSV file every 1 hour. The CSV contains 150k+ records for 1 account and there are multiple accounts in my system. I was working previously on rails and there was active record gem to handle this use case very efficiently. I am looking for an alternate to this gem in Django or any built in method that will be helpful to import such large data in bulk.
So far I have tried this code.
class ItemReportService:
def call(self, file_url):
with open(file_url, 'r') as file:
reader = csv.DictReader(file)
products = []
for row in reader:
product = self.process_product(row)
products.append(product)
self.update_products(products)
def process_product(self, row):
print(f'Processing sku: {row["SKU"]}')
product = Product.objects.filter(
sku=row['SKU']).first() or Product(sku=row['SKU'])
product.listing_title = row['Product Name']
product.listed_price = row['Price']
product.buy_box_price = row['Buy Box Item Price'] + \
row['Buy Box Shipping Price']
product.status = row['Lifecycle Status']
return product
def update_products(self, products):
Product.objects.bulk_update(
products,
[
'listing_title',
'listed_price',
'buy_box_price',
'Lifecycle Status'
]
)
It is raising this exception because when there is a new product it doesn't have primary key assigned to it
ValueError: All bulk_update() objects must have a primary key set.
Django 4.1 has new parameters for bulk_create(update_conflicts=bool and update_fields=[])
If your model has a field UNIQUE usually Django would ignore it when creating new data. But if you set the update_conflicts parameter to True, the fields inside update_fields will be updated.
You are not saving the product in the database before applying bulk_update.
I have checked your code for this purpose, you can use bulk_insert with an additional parameter
Model.objects.bulk_create(self.data, ignore_conflicts=True)
or
columns = ['column1', 'column2']
obj = Model.objects.filter(column1="sku").first()
if not obj:
obj = Model.objects.create(column1="sku")
obj.column1 = row["column1"] or obj.column1
obj.column2 = row["column2"] or obj.column2
items_to_be_inserted.append(obj)
In the end, you can do bulk update like
Model.objects.bulk_update(items_to_be_inserted, columns)
This will solve your problem.
I made this class function which can be used on any Django model in a project.
from django.db import models
class BaseModel(models.Model):
#classmethod
def bulk_create_or_update(
cls, uniques: list[str],
defaults: list[str],
data: list[dict]
):
# Get existing object list
data_dict, select = {}, None
for entry in data:
sub_entry, key = {}, ''
for uniq in uniques:
sub_entry[uniq] = entry[uniq]
key += str(entry[uniq])
data_dict[key] = entry
if not select:
select = models.Q(**sub_entry)
continue
select |= models.Q(**sub_entry)
records = cls.objects.filter(select).values('pk', *uniques)
existing = {}
for rec in records:
key = ''
for uniq in uniques:
key += str(rec[uniq])
existing[key] = rec
# Split new objects from existing ones
to_create, to_update = [], []
for key, entry in data_dict.items():
obj = cls(**entry)
if key not in existing:
to_create.append(obj)
continue
obj.pk = existing[key]['pk']
to_update.append(obj)
cls.objects.bulk_create(to_create, batch_size=1000)
cls.objects.bulk_update(to_create, defaults, batch_size=1000)
Let take an usage example
class Product(BaseModel)
price = models.IntegerField()
name = models.CharField(max_length=128, unique=True)
status = models.CharField(max_length=128)
if __name__ == '__main__':
data = [
{'price': 50, 'name': 'p1', 'status': 'New'},
{'price': 33, 'name': 'p2', 'status': 'Old'}
]
Product.bulk_create_or_update(uniques=['name'], defaults=['price', 'status'], data=data)
Any improvement suggestion of the code is welcome.
I'm unable to solve a problem with a search query in the database (sqlite3) in Tkinter. Parts of my code:
front.py
# Entries
self.name_text = tk.StringVar()
self.entry_name = tk.Entry(self.parent, textvariable=self.name_text)
self.entry_name.grid(row=3, column=1)
self.color_text = tk.StringVar()
self.combobox2=ttk.Combobox(self.parent, textvariable=self.color_text)
self.combobox2["values"] = ('red','blue','white')
self.labelCombobox=ttk.Label(self.parent, textvariable=self.color_text)
self.combobox2.grid(row=4, column=1)
self.parent.bind('<Return>',lambda e:refresh())
def search_command(self):
self.listBox.delete(0,tk.END)
for row in backend.database.search(self.name_text.get(),self.color_text.get()):
self.listBox.insert(tk.END, row)
backend.py class database:
def search(name="",color=""):
try:
connect = sqlite3.connect("color.db")
cur = connect.cursor()
sql = "SELECT * FROM color WHERE name=? OR color=?"
values = (self, name_text.get(), color_text.get())
cur.execute(sql, values)
rows = cur.fetchall()
name_text.set(rows[1])
color_text.set(rows[2])
entry_name.configure('disabled')
combobox2.configure('disabled')
connect.close()
except:
messagebox.showinfo('nothing found!')
I also tried to put a self in in an other version of backend.py. This gives the same error.
def search(self, name="",color=""):
try:
self.connect = sqlite3.connect("color.db")
self.cur = self.connect.cursor()
self.sql = "SELECT * FROM color WHERE name=? OR color=?"
self.values = (self, name_text.get(), color_text.get())
self.cur.execute(sql, values)
self.rows = self.cur.fetchall()
self.name_text.set(rows[1])
self.color_text.set(rows[2])
self.entry_name.configure('disabled')
self.combobox2.configure('disabled')
self.connect.close()
except:
messagebox.showinfo('nothing!')
Please help solve the error:
for row in backend.database.search(self.name_text.get(),self.color_text.get()):
TypeError: 'NoneType' object is not iterable
There are few issues on the backend.database.search() function:
name_text and color_text are undefined
passed arguments name and color should be used in values instead
it does not return any result (this is the cause of the error)
Below is a modified search() function:
def search(name="", color=""):
rows = () # assume no result in case of exception
try:
connect = sqlite3.connect("color.db")
cur = connect.cursor()
sql = "SELECT * FROM color WHERE name=? OR color=?"
values = (name, color) # use arguments name and color instead
cur.execute(sql, values)
rows = cur.fetchall()
connect.close()
except Exception as e:
print(e) # better to see what is wrong
messagebox.showinfo('nothing found!')
return rows # return result
The error TypeError: 'NoneType' object is not iterable means that your query is returning no rows.
That is at least partly because of this code:
sql = "SELECT * FROM color WHERE name=? OR color=?"
values = (self, name_text.get(), color_text.get())
cur.execute(sql, values)
This caused self to be used for the name parameter, and the result of name_text.get() will be associated with the color attribute. The result of color_text.get() is ignored.
You need to remove self - your sql uses two parameters so you need to send it two parameters.
The other problem appears to be that you're iterating over the results of search, but search doesn't return anything. You need to add a return statement to the search function.
My program stores "food" objects that are pickled into a dictionary and stored in a csv file, which acts as a database. I want to retrieve individual food objects on command from the dictionary, but when I attempt to I seem to only retrieve the last object in the dictionary.
import pickle
class Food(object):
fooddict = dict({})
def __init__(self, name, weight, calories, time):
self.name = name
self.weight = weight
self.calories = calories
self.time = time
def __str__(self):
return '{self.name}s'.format(self=self) + \
' weigh {self.weight}'.format(self=self) + \
' ounces, contain {self.calories}'.format(self=self) + \
' calories, and stay fresh for {self.time}'.format(self=self) + \
' days.'
#classmethod
def createFoodInput(cls):
name = str(input("Enter the name: "))
weight = float(input("Enter the weight: "))
calories = float(input("Enter the calories: "))
time = float(input("Enter how many days it can store for: "))
return cls(name, weight, calories, time)
def storeFoodDict(f):
fooddict = Food.retreiveFoodDict()
if fooddict == "Empty File":
fooddict = dict({f.name: f})
with open("food.csv", 'wb') as filewriter:
try:
pickle.dump(fooddict, filewriter)
except:
print("Error storing pickled dictionary")
else:
food_found = False
for key in list(fooddict):
if key.__eq__(f.name):
print("Food already stored!")
food_found = True
if not food_found:
fooddict.update({f.name: f})
with open("food.csv", 'wb') as filewriter:
try:
pickle.dump(fooddict, filewriter)
except:
print("Error storing pickled dictionary")
#classmethod
def retreiveFoodDict(cls):
with open("food.csv", 'rb') as filereader:
try:
fooddict = pickle.load(filereader)
return fooddict
except EOFError:
return("Empty File")
def findFood(title):
fooddict = Food.retreiveFoodDict()
for key in list(fooddict):
if key.__eq__(title):
continue
return fooddict[key]
s = "apple"
n = findFood(s) #does not work, it returns banana instead of apple
#which is really just grabbing whatever is the
#last object in the dictionary
m = findFood("banana") #seems to work, but only because banana is the
#last object in the dictionary
print(n) #should print an apple "food object" but instead prints a banana
print(str(m.calories)) #works, but if I said n.calories it would still print
#m.calories instead
p = Food.retreiveFoodDict() #seems to work and retrieve the dictionary
print(str(p)) #also seems to work of course
Console Output:
bananas weigh 5.0 ounces, contain 120.0 calories, and stay fresh for 3.0 days.
120.0
{'apple': <main.Food object at 0x00D2C2E0>, 'banana': <main.Food object at 0x00D36D00>}
The dictionary contains 2 food objects (apple and banana), but the print(n) statement shows a banana, not an apple. Can anyone point out why this is or what I am misunderstanding? Thank you so much!
I found the answer to my own problem. I was misusing the continue in my findFood function.
This code solved my issues.
def getFood(food_name):
fooddict = Food.retreiveFoodDict()
for key in list(fooddict):
if key.__eq__(food_name):
return fooddict[key]
What this function does is simply retrieve a dictionary of objects in a csv file and iterates through the keys until the passed key name is located. If found, the proper key name will be returned as a food object. My original mistake was using the "continue" keyword to stop the for-loop, which was returning the object directly after the one we wanted.
Edit 12/07/19: The problem was not in fact with pd.rename fuction but the fact that I did not return from the function the pandas dataframe and as a result the column change did not exist when printing. i.e.
def change_column_names(as_pandas, old_name, new_name):
as_pandas.rename(columns={old_name: new_name}, inplace=)
return as_pandas <- This was missing*
Please see the user comment below to uptick them for finding this error for me.
Alternatively, you can continue reading.
The data can be downloaded from this link, yet I have added a sample dataset. The formatting of the file is not a typical CSV file and I believe this may have been an assessment piece and is related to Hidden Decision Tree article. I have given the portion of the code as it solves the issues surrounding the format of the text file as mentioned above and allows the user to rename the column.
The problem occured when I tried to assign create a re-naming function:
def change_column_names(as_pandas, old_name, new_name):
as_pandas.rename(columns={old_name: new_name}, inplace=)
However, it seem to work when I set the variable names inside rename function.
def change_column_names(as_pandas):
as_pandas.rename(columns={'Unique Pageviews': 'Page_Views'}, inplace=True)
return as_pandas
Sample Dataset
Title URL Date Unique Pageviews
oupUrl=tutorials 18-Apr-15 5608
"An Exclusive Interview with Data Expert, John Bottega" http://www.datasciencecentral.com/forum/topics/an-exclusive-interview-with-data-expert-john-bottega?groupUrl=announcements 10-Jun-14 360
Announcing Composable Analytics http://www.datasciencecentral.com/forum/topics/announcing-composable-analytics 15-Jun-14 367
Announcing the release of Spark 1.5 http://www.datasciencecentral.com/forum/topics/announcing-the-release-of-spark-1-5 12-Sep-15 156
Are Extreme Weather Events More Frequent? The Data Science Answer http://www.datasciencecentral.com/forum/topics/are-extreme-weather-events-more-frequent-the-data-science-answer 5-Oct-15 204
Are you interested in joining the University of California for an empiricalstudy on 'Big Data'? http://www.datasciencecentral.com/forum/topics/are-you-interested-in-joining-the-university-of-california-for-an 7-Feb-13 204
Are you smart enough to work at Google? http://www.datasciencecentral.com/forum/topics/are-you-smart-enough-to-work-at-google 11-Oct-15 3625
"As a software engineer, what's the best skill set to have for the next 5-10years?" http://www.datasciencecentral.com/forum/topics/as-a-software-engineer-what-s-the-best-skill-set-to-have-for-the- 12-Feb-16 2815
A Statistician's View on Big Data and Data Science (Updated) http://www.datasciencecentral.com/forum/topics/a-statistician-s-view-on-big-data-and-data-science-updated-1 21-May-14 163
A synthetic variance designed for Hadoop and big data http://www.datasciencecentral.com/forum/topics/a-synthetic-variance-designed-for-hadoop-and-big-data?groupUrl=research 26-May-14 575
A Tough Calculus Question http://www.datasciencecentral.com/forum/topics/a-tough-calculus-question 10-Feb-16 937
Attribution Modeling: Key Analytical Strategy to Boost Marketing ROI http://www.datasciencecentral.com/forum/topics/attribution-modeling-key-concept 24-Oct-15 937
Audience expansion http://www.datasciencecentral.com/forum/topics/audience-expansion 6-May-13 223
Automatic use of insights http://www.datasciencecentral.com/forum/topics/automatic-use-of-insights 27-Aug-15 122
Average length of dissertations by higher education discipline. http://www.datasciencecentral.com/forum/topics/average-length-of-dissertations-by-higher-education-discipline 4-Jun-15 1303
This is the full code that produces the Key Error:
def change_column_names(as_pandas):
as_pandas.rename(columns={'Unique Pageviews': 'Page_Views'}, inplace=True)
def change_column_names(as_pandas, old_name, new_name):
as_pandas.rename(columns={old_name: new_name}, inplace=True)
def change_column_names(as_pandas):
as_pandas.rename(columns={'Unique Pageviews': 'Page_Views'},
inplace=True)
def open_as_dataframe(file_name_in):
reader = pd.read_csv(file_name_in, encoding='windows-1251')
return reader
# Get each column of data including the heading and separate each element
i.e. Title, URL, Date, Page Views
# and save to string_of_rows with comma separator for storage as a csv
# file.
def get_columns_of_data(*args):
# Function that accept variable length arguments
string_of_rows = str()
num_cols = len(args)
try:
if num_cols > 0:
for number, element in enumerate(args):
if number == (num_cols - 1):
string_of_rows = string_of_rows + element + '\n'
else:
string_of_rows = string_of_rows + element + ','
except UnboundLocalError:
print('Empty file \'or\' No arguments received, cannot be zero')
return string_of_rows
def open_file(file_name):
try:
with open(file_name) as csv_file_in, open('HDT_data5.txt', 'w') as csv_file_out:
csv_read = csv.reader(csv_file_in, delimiter='\t')
for row in csv_read:
try:
row[0] = row[0].replace(',', '')
csv_file_out.write(get_columns_of_data(*row))
except TypeError:
continue
print("The file name '{}' was successfully opened and read".format(file_name))
except IOError:
print('File not found \'OR\' Not in current directory\n')
# All acronyms used in variable naming correspond to the function at time
# of return from function.
# csv_list being a list of the v file contents the remainder i.e. 'st' of
# csv_list_st = split_title().
def main():
open_file('HDTdata3.txt')
multi_sets = open_as_dataframe('HDT_data5.txt')
# change_column_names(multi_sets)
change_column_names(multi_set, 'Old_Name', 'New_Name')
print(multi_sets)
main()
I cleaned up your code so it would run. You were changing the column names but not returning the result. Try the following:
import pandas as pd
import numpy as np
import math
def set_new_columns(as_pandas):
titles_list = ['Year > 2014', 'Forum', 'Blog', 'Python', 'R',
'Machine_Learning', 'Data_Science', 'Data',
'Analytics']
for number, word in enumerate(titles_list):
as_pandas.insert(len(as_pandas.columns), titles_list[number], 0)
def title_length(as_pandas):
# Insert new column header then count the number of letters in 'Title'
as_pandas.insert(len(as_pandas.columns), 'Title_Length', 0)
as_pandas['Title_Length'] = as_pandas['Title'].map(str).apply(len)
# Although it is log, percentage of change is inverse linear comparison of
#logX1 - logX2
# therefore you could think of it as the percentage change in Page Views
# map
# function allows for function to be performed on all rows in column
# 'Page_Views'.
def log_page_view(as_pandas):
# Insert new column header
as_pandas.insert(len(as_pandas.columns), 'Log_Page_Views', 0)
as_pandas['Log_Page_Views'] = as_pandas['Page_Views'].map(lambda x: math.log(1 + float(x)))
def change_to_numeric(as_pandas):
# Check for missing values then convert the column to numeric.
as_pandas = as_pandas.replace(r'^\s*$', np.nan, regex=True)
as_pandas['Page_Views'] = pd.to_numeric(as_pandas['Page_Views'],
errors='coerce')
def change_column_names(as_pandas):
as_pandas.rename(columns={'Unique Pageviews': 'Page_Views'}, inplace=True)
return as_pandas
def open_as_dataframe(file_name_in):
reader = pd.read_csv(file_name_in, encoding='windows-1251')
return reader
# Get each column of data including the heading and separate each element
# i.e. Title, URL, Date, Page Views
# and save to string_of_rows with comma separator for storage as a csv
# file.
def get_columns_of_data(*args):
# Function that accept variable length arguments
string_of_rows = str()
num_cols = len(args)
try:
if num_cols > 0:
for number, element in enumerate(args):
if number == (num_cols - 1):
string_of_rows = string_of_rows + element + '\n'
else:
string_of_rows = string_of_rows + element + ','
except UnboundLocalError:
print('Empty file \'or\' No arguments received, cannot be zero')
return string_of_rows
def open_file(file_name):
import csv
try:
with open(file_name) as csv_file_in, open('HDT_data5.txt', 'w') as csv_file_out:
csv_read = csv.reader(csv_file_in, delimiter='\t')
for row in csv_read:
try:
row[0] = row[0].replace(',', '')
csv_file_out.write(get_columns_of_data(*row))
except TypeError:
continue
print("The file name '{}' was successfully opened and read".format(file_name))
except IOError:
print('File not found \'OR\' Not in current directory\n')
# All acronyms used in variable naming correspond to the function at time
# of return from function.
# csv_list being a list of the v file contents the remainder i.e. 'st' of
# csv_list_st = split_title().
def main():
open_file('HDTdata3.txt')
multi_sets = open_as_dataframe('HDT_data5.txt')
multi_sets = change_column_names(multi_sets)
change_to_numeric(multi_sets)
log_page_view(multi_sets)
title_length(multi_sets)
set_new_columns(multi_sets)
print(multi_sets)
main()
I'm currently writing a class called SMS_store(). In it, I have a method called delete.
Delete is simply supposed to make sure the user has given me a valid integer. If so, it's supposed to pop an item from the list.
class SMS_store():
def __init__(self):
self.__inbox = []
def delete(self, i):
if i >= len(self.__inbox):
return None
else:
self.__inbox.pop[i]
Whenever I run the code in my test program, I run into two errors at my delete stage:
1) if I type myInbox.delete(2) when there's only 2 items in the list, I get "list index out of range" and I though I was protected from that error. myInbox.delete(3) gives me None.
2) If I type myInbox.delete(1) when there's a valid index 1 in my list, it says global name 'msg' not defined. I don't get why I'm seeing that error.
Here's my full class code.
#SMS_store class
"""
Pre-condition: SMS_store class is instantiated in client code.
Post-condition: SMS_store class is instantiated.
"""
class SMS_store():
#Object instantiation
"""
Pre-conditon: SMS_store class is instantiated in client code.
Post-condition: Object creates an empty list.
"""
def __init__(self):
self.__inbox = []
#add_new_arrival method
"""
Pre-condition: Class method is handed a valid phone number of 11, 10, or 7
digits as a string with no hyphens or letters, a string containing a time,
and a string containing the text of a message.
Post-condition: Method will append a tuple containing False for an
undread message, the phone number, the time arrived and the text of the
message to the class created list.
"""
def add_new_arrival(self, from_number, time_arrived, text_of_SMS):
number = from_number
#Check for valid phone number and add hyphens based on number length
if len(number) == 11:
number = number[0] + "-" + number[1:4] + "-" + number[4:7] + "-"\
+ number[7:]
elif len(number) == 7:
number = number[:3] + "-" + number[3:]
elif len(number) == 10:
number = "1-" + number[:3] + "-" + number[3:6] + "-" + number[6:]
elif number.isalpha():
number = "Invalid number"
else:
number = "Invalid number"
time = time_arrived
text = text_of_SMS
message = (False, number, time, text)
self.__inbox.append(message)
#message_count method
"""
Post-condition: method returns the number of tuples in class created list.
Returns None if list is empty.
"""
def message_count(self):
count = len(self.__inbox)
if count == 0:
return None
else:
return count
#get_unread_indexes method
"""
Post-condition: method creates an empty list,checks for any tuples with
"False" at index 0. If "False" is found, it appends the index for the
tuple in the list. Method returns list of indexes.
"""
def get_unread_indexes(self):
unread = []
for message in self.__inbox:
if message[0] == False:
unread.append(self.__inbox.index(message))
return unread
#get_message method
"""
Pre-condition: Method is passed an integer.
Post-condition: Method checks for a valid index number. If valid, the
method will then check if indexed tuple contains "True" or "False" at index
0. If True, message is returned in new tuple containing items from indexes
1, 2, and 3. If False, a new tuple is created containing "True"
indicating the message is now read, plus indexes 1, 2, and 3 from the
original called tuple.
"""
def get_message(self, i):
#check for valid index number
if i >= len(self.__inbox):
return None
else:
msg = self.__inbox[i]
if msg[0] == True:
return (msg[1], msg[2], msg[3])
#create new tuple with True, and index 1-3 from original tuple
else:
self.__inbox.pop(i)
newMsg = (True, msg[1], msg[2], msg[3])
self.__inbox.insert(i, newMsg)
return newMsg[1:]
#delete method
"""
Pre-condition: Method is passed an integer.
Post-condition: Method checks that the integer is a valid index number. If
valid, method pops index from class created list.
"""
def delete(self, i):
if i >= len(self.__inbox):
return None
else:
self.__inbox.pop(i)
#Clear method
"""
Post-condition: method resets the inbox to an empty list.
"""
def clear(self):
self.__inbox = []
Here's how I am using the code in my test program:
#Test instantiation
naomisInbox = SMS_store()
martisInbox = SMS_store()
#Test add_new_arrival
naomisInbox.add_new_arrival("12345678912", "10:38PM", "Yay! Sorry, been")
martisInbox.add_new_arrival("23456789123", "10:37PM", "Hey I finally hit 90")
martisInbox.add_new_arrival("12345678912", "10:40PM", "Now I sleep :)")
naomisInbox.add_new_arrival("23456789123", "10:40PM", "Night")
#Test message_count
count = naomisInbox.message_count()
print("Naomi has", count, "messages in her inbox.")
count = martisInbox.message_count()
print("Marti has", count, "messages in his inbox.\n")
#Test get_unread_indexes
numUnread = naomisInbox.get_unread_indexes()
print("Naomi has unread messages at indexes: ", numUnread)
numUnread = martisInbox.get_unread_indexes()
print("Marti has unread messages at indexes: ", numUnread,"\n")
#Test get_message
msg = naomisInbox.get_message(9)
print("Getting message from Naomi's inbox at index [9]: ")
if msg == None:
print("No message at that index.")
else:
for item in msg:
print(item)
print("\n")
numUnread = naomisInbox.get_unread_indexes()
print("Naomi now has unread messages at indexes: ", numUnread, "\n")
msg = martisInbox.get_message(1)
print("Getting message from Marti's inbox at index [1]:")
for item in msg:
print(item)
print("\n")
numUnread = martisInbox.get_unread_indexes()
print("Marti now has unread messages at indexes: ", numUnread, "\n")
#Test delete
remove = naomisInbox.delete(0)
if remove == None:
print("Invalid index.")
count = naomisInbox.message_count()
numUnread = naomisInbox.get_unread_indexes()
print("Naomi now has", count, "messages with unread messages at index: ",\
numUnread)
#Test clear
print("\nAfter clearing: ")
naomisInbox.clear()
count = naomisInbox.message_count()
print("Naomi now has", count, "messages in her inbox.")
martisInbox.clear()
count = martisInbox.message_count()
print("Marti now has", count, "messages in his inbox.")
Error
Error:
Traceback (most recent call last):
File "/home/theriddler/Documents/CSIS153/Assignments/Nansen3/Nansen3.py", line 56, in <module>
remove = naomisInbox.delete(0)
File "/home/theriddler/Documents/CSIS153/Assignments/Nansen3/modSMS.py", line 125, in delete
NameError: global name 'msg' is not defined
Any help is appreciated. Sorry if it's a repeated question. Thanks, Blackwell.
for your first problem.
1)if there are only two items in the list then you cannot delete the 2nd item by passing 2 as index it should be 1.
2)your second problem tells that you are using same msg variable in SMS_store class within different functions without defining it as self variable for the class. However cant find any thing for now. You should probably check it again as it works well on my machine.
Now a little more light on your delete method:
def delete(self, i):
if i >= len(self.__inbox):
return None
else:
self.__inbox.pop(i)
Here if you want to delete the last message always then just use self.__ibox.pop() without passing any index but in case you want to delete an indexed message then u should do self.__ibox.pop(i-1)
because in case i is last element of the list then it will always be equal to length of the list and else will never be executed.
Also your delete method returns None only in if condition but if else runs then again None is returned by default so
remove = naomisInbox.delete(0)
if remove == None:
print("Invalid index.")
This will always print 'invalid index' as message even if the message gets deleted.