I'm using Python 3.9 and Django 3.2. I have a Django model with a couple of many-to-many relationsips
class Coop(models.Model):
objects = CoopManager()
name = models.CharField(max_length=250, null=False)
types = models.ManyToManyField(CoopType, blank=False)
addresses = models.ManyToManyField(Address, through='CoopAddressTags')
enabled = models.BooleanField(default=True, null=False)
phone = models.ForeignKey(ContactMethod, on_delete=models.CASCADE, null=True, related_name='contact_phone')
email = models.ForeignKey(ContactMethod, on_delete=models.CASCADE, null=True, related_name='contact_email')
web_site = models.TextField()
description = models.TextField(null=True)
approved = models.BooleanField(default=False, null=True)
proposed_changes = models.JSONField("Proposed Changes", null=True)
reject_reason = models.TextField(null=True)
I can search for my model using a manager class that builds a query like so ...
def find(
self,
partial_name,
types_arr=None,
enabled=None,
city=None,
zip=None,
street=None,
state_abbrev=None
):
"""
Lookup coops by varying criteria.
"""
q = Q()
if partial_name:
q &= Q(name__icontains=partial_name)
if enabled != None:
q &= Q(enabled=enabled)
if types_arr != None:
filter = Q(
*[('types__name', type) for type in types_arr],
_connector=Q.OR
)
q &= filter
if street != None:
q &= Q(addresses__raw__icontains=street)
if city != None:
q &= Q(addresses__locality__name__iexact=city)
if zip != None:
q &= Q(addresses__locality__postal_code=zip)
if state_abbrev != None:
q &= Q(addresses__locality__state__code=state_abbrev)
q &= Q(addresses__locality__state__country__code="US")
queryset = Coop.objects.filter(q)
print(queryset.query)
return queryset
In my view, I invoke and return the data using
coops = Coop.objects.find(...)
serializer = CoopSearchSerializer(coops, many=True)
in which the serializer looks like
class CoopSearchSerializer(serializers.ModelSerializer):
...
def to_representation(self, instance):
rep = super().to_representation(instance)
rep['coopaddresstags_set'] = CoopAddressTagsSerializer(instance.coopaddresstags_set.all(), many=True).data
return rep
What I'm noticing is when I search for a result set that contains 6 results, and each one is serialized using the logic above, I get a proportional number of queries run for each result ...
SELECT "directory_coopaddresstags"."id", "directory_coopaddresstags"."coop_id", "directory_coopaddresstags"."address_id", "directory_coopaddresstags"."is_public" FROM "directory_coopaddresstags" WHERE "directory_coopaddresstags"."coop_id" = 271; args=(271,)
(0.000) SELECT "directory_coopaddresstags"."id", "directory_coopaddresstags"."coop_id", "directory_coopaddresstags"."address_id", "directory_coopaddresstags"."is_public" FROM "directory_coopaddresstags" WHERE "directory_coopaddresstags"."coop_id" = 271; args=(271,)
type of instance: <class 'directory.models.CoopAddressTags'>
(0.000) SELECT "address_address"."id", "address_address"."street_number", "address_address"."route", "address_address"."locality_id", "address_address"."raw", "address_address"."formatted", "address_address"."latitude", "address_address"."longitude" FROM "address_address" WHERE "address_address"."id" = 263 LIMIT 21; args=(263,)
(0.000) SELECT "address_locality"."id", "address_locality"."name", "address_locality"."postal_code", "address_locality"."state_id" FROM "address_locality" WHERE "address_locality"."id" = 16 LIMIT 21; args=(16,)
(0.000) SELECT "address_state"."id", "address_state"."name", "address_state"."code", "address_state"."country_id" FROM "address_state" WHERE "address_state"."id" = 19313 LIMIT 21; args=(19313,)
(0.000) SELECT "address_country"."id", "address_country"."name", "address_country"."code" FROM "address_country" WHERE "address_country"."id" = 484 LIMIT 21; args=(484,)
So if there are 6 results, the above gets run 6 times with different IDs. Is there a way to only have a single set of queries run so taht regardless of whether there are 1, 10, or 100 results, the same number of queries gets run to return the data?
I see, you collect your data from many M2M and FK relations.
I am agree with #Amres, you can use prefetch_related and select_related in your queries, but simply use of those methods can not help you.
First:
With prefetch_related and select_related you can achieve 3 queries per request. Not lower. 1 big query and 2 for m2m relations.
More here:
https://docs.djangoproject.com/en/4.1/ref/models/querysets/#select-related
https://docs.djangoproject.com/en/4.1/ref/models/querysets/#prefetch-related
Second:
You can not use simply:
.prefetch_related('addresses')
It does nothing. In your case you need to use Prefetch object.
More here:
https://docs.djangoproject.com/en/4.1/ref/models/querysets/#django.db.models.Prefetch
Why? Answer is - you have additional FK in Address model:
addresses__raw,
addresses__locality,
addresses__locality__state,
addresses__locality__state__country
How it can be declared:
addressPrefetcher = Prefetch('adresses', queryset=Address.objects.select_related('raw', 'locality', 'locality__state', 'locality__state__country'))
After that
You can use this construction:
coops = Coop.objects.find(Your_query).select_related('phone', 'email').prefetch_related(addressPrefetcher, 'types')
If i really correct understand your models - you receive only 3 hits in DB, don't matter how many objects in CoopSearchSerializer. You can achieve only 1 hit with GroupConcat, but for this business case it is overhead.
p.s. Probably keep phone and email as FK, is a not good idea. But it is possible.
I'm stuck on a pretty simple issue with peewee-async regarding JOINs, or perhaps I need to use a subquery, or prefetch... I can't figure it out what kind of query I need to do.
I have 2 database tables (parent/child):
class Group(PeeweeModel):
id = peewee.AutoField()
name = peewee.TextField()
class Channel(PeeweeModel):
id = peewee.AutoField()
name = peewee.TextField()
group = peewee.ForeignKeyField(Group, backref="channels")
I need to fetch 1 group object, and this object has multiple channel objects.
I tried:
q = Group.select(Group, Channel).join(Channel)
But my backref 'channels' is always a ModelQuery instance, not the actual resultset.
Full code
import asyncio
import peewee
import peewee_async
from peewee_async import Manager, PooledPostgresqlDatabase
database = PooledPostgresqlDatabase('test', max_connections=4, user='postgres', password='', host='127.0.0.1')
objects = peewee_async.Manager(database)
class PeeweeModel(peewee.Model):
class Meta:
database = database
class Group(PeeweeModel):
id = peewee.AutoField()
name = peewee.TextField()
class Channel(PeeweeModel):
id = peewee.AutoField()
name = peewee.TextField()
group = peewee.ForeignKeyField(Group, backref="channels")
Group.create_table()
Channel.create_table()
database.set_allow_sync(False)
async def handler():
# create 1 group object
group = await objects.create(Group, name="TestGroup")
# create 2 channel objects, assign to group
await objects.create(Channel, name="TestName1", group=group)
await objects.create(Channel, name="TestName2", group=group)
# Query 1 group, and hopefully it will have the channels
q = Group.select(Group, Channel).join(Channel)
results = await objects.execute(q)
for result in results:
print(result.channels) # problem: Channels is not a list of channel objects, but a `ModelSelect` instead
with objects.allow_sync():
Channel.drop_table(True)
Group.drop_table(True)
loop = asyncio.get_event_loop()
loop.run_until_complete(handler())
loop.close()
I was able to get help from an expertâ„¢ and the solution is to use prefetch():
async def handler():
# create 1 group object
group = await objects.create(Group, name="TestGroup")
# create 2 channel objects, assign to group
await objects.create(Channel, name="TestName", group=group)
await objects.create(Channel, name="TestName", group=group)
# Query 1 group, and hopefully it will have the channels
q = Group.select(Group)
groups = await objects.prefetch(q, Channel.select(Channel))
for group in groups:
print(group, group.channels) # channels is a list of channels.
with objects.allow_sync():
Channel.drop_table(True)
Group.drop_table(True)
Peewee will figure out the relationship (backref) by itself.
i know we can create automatic action using cron in odoo
but I want something a different
in the mass mailing of odoo i want to add a repetion option of mail mass mailings
Example in the Form view_mail_mass_mailing_form > Options page
I added a repetition selection field,
I added this because I want each mass mail alone
class MailMassMailing(models.Model):
_inherit = 'mail.mass_mailing'
recurrence_mail = fields.Selection([
('daily', 'Day'),
('weekly', 'Weeks'),
('monthly', 'Months'),
], string='Recurring')
I want this mass mailng to send each (days or weeks or months)
how to call a function with interval date,
how to call a function every (days or weeks or months)
The sending of this mass mailing is revived from the date of creation
Just extend Mass Mailing model with a new date field and implement a model method to use for a daily running ir.cron.
from odoo import api, fields, models
class MailMassMailing(models.Model):
_inherit = 'mail.mass_mailing'
recurrence_mail = fields.Selection([
('daily', 'Day'),
('weekly', 'Weeks'),
('monthly', 'Months'),
], string='Recurring')
last_sent_on = fields.Date()
#api.model
def run_send_recurring(self):
""" Resend mass mailing with recurring interval"""
domain = [('recurrence_mail', '!=', False)]
# TODO monthly should be solved in another way, but that
# is not needed for this example
deltas = {'daily': 1, 'weekly': 7, 'monthly': 30}
today = fields.Date.today()
for mass_mail in self.search(domain):
# never sent? go send it
if not mass_mail.last_sent_on:
# send the way you want
# or get delta between today and last_sent_on
last_dt = fields.Date.from_string(mass_mail.last_sent_on)
if (today - last_dt).days >= deltas[mass_mail.recurrence_mail]:
# send the way you want
Thank you #CZoellner for your help
I found the solution with your idea
# Solution ############### .py
#api.model
def run_send_recurring(self):
""" Resend mass mailing with recurring interval"""
date_format = '%Y-%m-%d'
domain = [('recurrence_mail', '!=', False),('state','=','done')]
deltas = {'daily': 1, 'weekly': 7, 'monthly': 30}
logger.info("______deltas________: %s ",deltas)
today = fields.Date.today()
logger.info("______today________: %s ",today)
for mass_mail in self.search(domain):
logger.info("______mass_mail________: %s ",mass_mail)
# never sent? go send it
if not mass_mail.last_sent_on:
self.put_in_queue()
joining_date = mass_mail.last_sent_on
current_date = (datetime.today()).strftime(date_format)
print('joining_date',joining_date)
d1 = datetime.strptime(joining_date, date_format).date()
logger.info("______1 day________: %s ",d1)
d2 = datetime.strptime(current_date, date_format).date()
logger.info("______2 day________: %s ",d2)
logger.info("______deltas[mass_mail.recurrence_mail]________: %s ",deltas[mass_mail.recurrence_mail])
r = relativedelta(d1,d2)
logger.info("______r day________: %s ",r.days)
if (r ,'>=' , deltas[mass_mail.recurrence_mail]):
mass_mail.put_in_queue()
I have an application which produces reports in Excel (.XLS) format. I need to append the data from these reports to an existing table in a MS Access 2010 database. A typical record is:
INC000000004154 Closed Cbeebies BBC Childrens HQ6 monitor wall dropping out. HQ6 P3 3/7/2013 7:03:01 PM 3/7/2013 7:03:01 PM 3/7/2013 7:14:15 PM The root cause of the problem was the power supply to the PC which was feeding the monitor. HQ6 Monitor wall dropping out. BBC Third Party Contractor supply this equipment.
The complication is that I need to do some limited processing on the data. Viz
Specifically I need to do a couple of lookups converting names to numbers and also parse a date-string (the report for some reason puts the dates in to the spreadsheet in text format rather than date format).
Now I could do this in Python using XLRD/XLWT but would much prefer to do it in Excel or Access. Does anyone have any advice on a good way to approach this? I would very much prefer NOT to use VBA so could I do something like record an MS Excel macro and then execute that macro on the newly created XLS file?
You can directly import some Excel data into MS Access, but if your requirement is to do some processing because then I don't see how you will be able to achieve that without:
an ETL application, like Pentaho or Talend or others.
That will certainly be like using a hammer to crush an ant though.
some other external data processing pipeline, in Python or some other programming language.
VBA (wether through macros or hand coded).
VBA has been really good at doing that sort of things in Access for literally decades.
Since you are using Excel and Access, staying within that realm looks like the best solution for solving your issue.
Just use queries:
You import the data without transformation to a table whose sole purpose is to accommodate the data from Excel; then you create queries from that raw data to add the missing information and massage the data before appending the result into your final destination table.
That solution has the advantage of letting you create simple steps in Access that you can easily record using macros.
I asked this question some time ago and decided it would be easier to do it in Python. Gord asked me to share, and here it is (sorry about the delay, other projects took priority for a while).
"""
Routine to migrate the S7 data from MySQL to the new Access
database.
We're using the pyodbc libraries to connect to Microsoft Access
Note that there are 32- and 64-bit versions of these libraries
available but in order to work the word-length for pyodbc and by
implication Python and all its associated compiled libraries must
match that of MS Access. Which is an arse as I've just had to
delete my 64-bit installation of Python and replace it and all
the libraries with the 32-bit version.
Tim Greening-Jackson 08 May 2013 (timATgreening-jackson.com)
"""
import pyodbc
import re
import datetime
import tkFileDialog
from Tkinter import *
class S7Incident:
"""
Class containing the records downloaded from the S7.INCIDENTS table
"""
def __init__(self, id_incident, priority, begin, acknowledge,
diagnose, workaround,fix, handoff, lro, nlro,
facility, ctas, summary, raised, code):
self.id_incident=unicode(id_incident)
self.priority = {u'P1':1, u'P2':2, u'P3':3, u'P4':4, u'P5':5} [unicode(priority.upper())]
self.begin = begin
self.acknowledge = acknowledge
self.diagnose = diagnose
self.workaround = workaround
self.fix = fix
self.handoff = True if handoff else False
self.lro = True if lro else False
self.nlro = True if nlro else False
self.facility = unicode(facility)
self.ctas = ctas
self.summary = "** NONE ***" if type(summary) is NoneType else summary.replace("'","")
self.raised = raised.replace("'","")
self.code = 0 if code is None else code
self.production = None
self.dbid = None
def __repr__(self):
return "[{}] ID:{} P{} Prod:{} Begin:{} A:{} D:+{}s W:+{}s F:+{}s\nH/O:{} LRO:{} NLRO:{} Facility={} CTAS={}\nSummary:'{}',Raised:'{}',Code:{}".format(
self.id_incident,self.dbid, self.priority, self.production, self.begin,
self.acknowledge, self.diagnose, self.workaround, self.fix,
self.handoff, self.lro, self.nlro, self.facility, self.ctas,
self.summary, self.raised, self.code)
def ProcessIncident(self, cursor, facilities, productions):
"""
Produces the SQL necessary to insert the incident in to the Access
database, executes it and then gets the autonumber ID (dbid) of the newly
created incident (this is used so LRO, NRLO CTAS and AD1 can refer to
their parent incident.
If the incident is classed as LRO, NLRO, CTAS then the appropriate
record is created. Returns the dbid.
"""
if self.raised.upper() in productions:
self.production = productions[self.raised.upper()]
else:
self.production = 0
sql="""INSERT INTO INCIDENTS (ID_INCIDENT, PRIORITY, FACILITY, BEGIN,
ACKNOWLEDGE, DIAGNOSE, WORKAROUND, FIX, HANDOFF, SUMMARY, RAISED, CODE, PRODUCTION)
VALUES ('{}', {}, {}, #{}#, {}, {}, {}, {}, {}, '{}', '{}', {}, {})
""".format(self.id_incident, self.priority, facilities[self.facility], self.begin,
self.acknowledge, self.diagnose, self.workaround, self.fix,
self.handoff, self.summary, self.raised, self.code, self.production)
cursor.execute(sql)
cursor.execute("SELECT ##IDENTITY")
self.dbid = cursor.fetchone()[0]
if self.lro:
self.ProcessLRO(cursor, facilities[self.facility])
if self.nlro:
self.ProcessNLRO(cursor, facilities[self.facility])
if self.ctas:
self.ProcessCTAS(cursor, facilities[self.facility], self.ctas)
return self.dbid
def ProcessLRO(self, cursor, facility):
sql = "INSERT INTO LRO (PID, DURATION, FACILITY) VALUES ({}, {}, {})"\
.format(self.dbid, self.workaround, facility)
cursor.execute(sql)
def ProcessNLRO(self, cursor, facility):
sql = "INSERT INTO NLRO (PID, DURATION, FACILITY) VALUES ({}, {}, {})"\
.format(self.dbid, self.workaround, facility)
cursor.execute(sql)
def ProcessCTAS(self, cursor, facility, code):
sql = "INSERT INTO CTAS (PID, DURATION, FACILITY, CODE) VALUES ({}, {}, {}, {})"\
.format(self.dbid, self.workaround, facility, self.ctas)
cursor.execute(sql)
class S7AD1:
"""
S7.AD1 records.
"""
def __init__(self, id_ad1, date, ref, commentary, adjustment):
self.id_ad1 = id_ad1
self.date = date
self.ref = unicode(ref)
self.commentary = unicode(commentary)
self.adjustment = float(adjustment)
self.pid = 0
self.production = 0
def __repr__(self):
return "[{}] Date:{} Parent:{} PID:{} Amount:{} Commentary: {} "\
.format(self.id_ad1, self.date.strftime("%d/%m/%y"), self.ref, self.pid, self.adjustment, self.commentary)
def SetPID(self, pid):
self.pid = pid
def SetProduction(self, p):
self.production = p
def Process(self, cursor):
sql = "INSERT INTO AD1 (pid, begin, commentary, production, adjustment) VALUES ({}, #{}#, '{}', {}, {})"\
.format(self.pid, self.date.strftime("%d/%m/%y"), self.commentary, self.production, self.adjustment)
cursor.execute(sql)
class S7Financial:
"""
S7 monthly financial summary of income and penalties from S7.FINANCIALS table.
These are identical in the new database
"""
def __init__(self, month, year, gco, cta, support, sc1, sc2, sc3, ad1):
self.begin = datetime.date(year, month, 1)
self.gco = float(gco)
self.cta = float(cta)
self.support = float(support)
self.sc1 = float(sc1)
self.sc2 = float(sc2)
self.sc3 = float(sc3)
self.ad1 = float(ad1)
def __repr__(self):
return "Period: {} GCO:{:.2f} CTA:{:.2f} SUP:{:.2f} SC1:{:.2f} SC2:{:.2f} SC3:{:.2f} AD1:{:.2f}"\
.format(self.start.strftime("%m/%y"), self.gco, self.cta, self.support, self.sc1, self.sc2, self.sc3, self.ad1)
def Process(self, cursor):
"""
Insert in to FINANCIALS table
"""
sql = "INSERT INTO FINANCIALS (BEGIN, GCO, CTA, SUPPORT, SC1, SC2, SC3, AD1) VALUES (#{}#, {}, {}, {}, {}, {}, {},{})"\
.format(self.begin, self.gco, self.cta, self.support, self.sc1, self.sc2, self.sc3, self.ad1)
cursor.execute(sql)
class S7SC3:
"""
Miscellaneous S7 SC3 stuff. The new table is identical to the old one.
"""
def __init__(self, begin, month, year, p1ot, p2ot, totchg, succchg, chgwithinc, fldchg, egychg):
self.begin = begin
self.p1ot = p1ot
self.p2ot = p2ot
self.changes = totchg
self.successful = succchg
self.incidents = chgwithinc
self.failed = fldchg
self.emergency = egychg
def __repr__(self):
return "{} P1:{} P2:{} CHG:{} SUC:{} INC:{} FLD:{} EGY:{}"\
.format(self.period.strftime("%m/%y"), self.p1ot, self.p1ot, self.changes, self.successful, self.incidents, self.failed, self.emergency)
def Process(self, cursor):
"""
Inserts a record in to the Access database
"""
sql = "INSERT INTO SC3 (BEGIN, P1OT, P2OT, CHANGES, SUCCESSFUL, INCIDENTS, FAILED, EMERGENCY) VALUES\
(#{}#, {}, {}, {}, {}, {}, {}, {})"\
.format(self.begin, self.p1ot, self.p2ot, self.changes, self.successful, self.incidents, self.failed, self.emergency)
cursor.execute(sql)
def ConnectToAccessFile():
"""
Prompts the user for an Access database file, connects, creates a cursor,
cleans out the tables which are to be replaced, gets a hash of the facilities
table keyed on facility name returning facility id
"""
# Prompts the user to select which Access DB file he wants to use and then attempts to connect
root = Tk()
dbname = tkFileDialog.askopenfilename(parent=root, title="Select output database", filetypes=[('Access 2010', '*.accdb')])
root.destroy()
# Connect to the Access (new) database and clean its existing incidents etc. tables out as
# these will be replaced with the new data
dbcxn = pyodbc.connect("Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ="+dbname+";")
dbcursor=dbcxn.cursor()
print("Connected to {}".format(dbname))
for table in ["INCIDENTS", "AD1", "LRO", "NLRO", "CTAS", "SC3", "PRODUCTIONS", "FINANCIALS"]:
print("Clearing table {}...".format(table))
dbcursor.execute("DELETE * FROM {}".format(table))
# Get the list of facilities from the Access database...
dbcursor.execute("SELECT id, facility FROM facilities")
rows = dbcursor.fetchall()
dbfacilities = {unicode(row[1]):row[0] for row in rows}
return dbcxn, dbcursor, dbfacilities
# Entry point
incre = re.compile("INC\d{12}[A-Z]?") # Regex that matches incident references
try:
dbcxn, dbcursor, dbfacilities = ConnectToAccessFile()
# Connect to the MySQL S7 (old) database and read the incidents and ad1 tables
s7cxn = pyodbc.connect("DRIVER={MySQL ODBC 3.51 Driver}; SERVER=localhost;DATABASE=s7; UID=root; PASSWORD=********; OPTION=3")
print("Connected to MySQL S7 database")
s7cursor = s7cxn.cursor()
s7cursor.execute("""
SELECT id_incident, priority, begin, acknowledge,
diagnose, workaround, fix, handoff, lro, nlro,
facility, ctas, summary, raised, code FROM INCIDENTS""")
rows = s7cursor.fetchall()
# Discard any incidents which don't have a reference of the form INC... as they are ancient
print("Fetching incidents")
s7incidents = {unicode(row[0]):S7Incident(*row) for row in rows if incre.match(row[0])}
# Get the list of productions from the S7 database to replace the one we've just deleted ...
print("Fetching productions")
s7cursor.execute("SELECT DISTINCT RAISED FROM INCIDENTS")
rows = s7cursor.fetchall()
s7productions = [r[0] for r in rows]
# ... now get the AD1s ...
print("Fetching AD1s")
s7cursor.execute("SELECT id_ad1, date, ref, commentary, adjustment from AD1")
rows = s7cursor.fetchall()
s7ad1s = [S7AD1(*row) for row in rows]
# ... and the financial records ...
print("Fetching Financials")
s7cursor.execute("SELECT month, year, gco, cta, support, sc1, sc2, sc3, ad1 FROM Financials")
rows = s7cursor.fetchall()
s7financials = [S7Financial(*row) for row in rows]
print("Writing financials ({})".format(len(s7financials)))
[p.Process(dbcursor) for p in s7financials]
# ... and the SC3s.
print("Fetching SC3s")
s7cursor.execute("SELECT begin, month, year, p1ot, p2ot, totchg, succhg, chgwithinc, fldchg, egcychg from SC3")
rows = s7cursor.fetchall()
s7sc3s = [S7SC3(*row) for row in rows]
print("Writing SC3s ({})".format(len(s7sc3s)))
[p.Process(dbcursor) for p in s7sc3s]
# Re-create the productions table in the new database. Note we refer to production
# by number in the incidents table so need to do the SELECT ##IDENTITY to give us the
# autonumber index. To make sure everything is case-insensitive convert the
# hash keys to UPPERCASE.
dbproductions = {}
print("Writing productions ({})".format(len(s7productions)))
for p in sorted(s7productions):
dbcursor.execute("INSERT INTO PRODUCTIONS (PRODUCTION) VALUES ('{}')".format(p))
dbcursor.execute("SELECT ##IDENTITY")
dbproductions[p.upper()] = dbcursor.fetchone()[0]
# Now process the incidents etc. that we have retrieved from the S7 database
print("Writing incidents ({})".format(len(s7incidents)))
[s7incidents[k].ProcessIncident(dbcursor, dbfacilities, dbproductions) for k in sorted(s7incidents)]
# Match the new parent incident IDs in the AD1s and then write to the new table. Some
# really old AD1s don't have the parent incident reference in the REF field, it is just
# mentioned SOMEWHERE in the commentary. So if the REF field doesn't match then do a
# re.search (not re.match!) for it. It isn't essential to match these older AD1s with
# their parent incident, but it is quite useful (and tidy).
print("Matching and writing AD1s".format(len(s7ad1s)))
for a in s7ad1s:
if a.ref in s7incidents:
a.SetPID(s7incidents[a.ref].dbid)
a.SetProduction(s7incidents[a.ref].production)
else:
z=incre.search(a.commentary)
if z and z.group() in s7incidents:
a.SetPID(s7incidents[z.group()].dbid)
a.SetProduction(s7incidents[z.group()].production)
a.Process(dbcursor)
print("Comitting changes")
dbcursor.commit()
finally:
print("Closing databases")
dbcxn.close()
s7cxn.close()
It turns out that the file has additional complications in terms of mangled data which will require a degree of processing which is a pain to do in Excel but trivially simple in Python. So I will re-use some Python 2.x scripts which use the XLWT/XLRD libraries to munge the spreadsheet.